aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c142
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c6
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_hci.h40
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c36
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c3
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c160
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c103
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c159
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c105
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c230
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c253
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c73
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c687
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c137
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c89
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c172
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c123
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c15
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c23
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h3
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c88
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c74
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c69
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h45
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c179
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c53
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c161
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c2009
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h233
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h2
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h60
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c47
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c28
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c52
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c36
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c256
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h22
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c547
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c706
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c668
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h34
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c30
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c529
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.c722
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c813
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c554
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c318
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c408
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c433
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c405
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1298
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c298
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c1072
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c718
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1052
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c812
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c842
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c63
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h26
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c378
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h106
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c55
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma.h10
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c39
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c18
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.h5
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h70
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c64
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c148
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c12
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c8
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig24
-rw-r--r--drivers/net/ethernet/neterion/Makefile1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c5099
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2086
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c1154
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h48
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4808
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h516
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c2428
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h2290
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h49
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c23
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/rings.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c122
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/rings.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c153
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c220
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c30
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c30
-rw-r--r--drivers/net/ethernet/sfc/ef100.c70
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c148
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.h9
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c510
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h13
-rw-r--r--drivers/net/ethernet/sfc/ef100_regs.h83
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c435
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h69
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c46
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.c32
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.h2
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c84
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.c73
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c115
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h19
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c22
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c51
-rw-r--r--drivers/net/ethernet/sfc/falcon/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/filter.h18
-rw-r--r--drivers/net/ethernet/sfc/mae.c346
-rw-r--r--drivers/net/ethernet/sfc/mae.h42
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c63
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h5
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h8136
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol_mae.h24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h79
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c8
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h10
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/sriov.c10
-rw-r--r--drivers/net/ethernet/sfc/tc.c252
-rw-r--r--drivers/net/ethernet/sfc/tc.h85
-rw-r--r--drivers/net/ethernet/sfc/tx.c10
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c35
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c157
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c737
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c6
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.h2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.h2
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig32
-rw-r--r--drivers/net/ethernet/wangxun/Makefile6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe.h24
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c166
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h57
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c15
454 files changed, 31438 insertions, 31211 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 955abbc5490e..9a55c1d5a0a1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -84,6 +84,7 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
+source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9eb01169957f..c06e75ed4231 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -97,6 +97,7 @@ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index fbf4588994ac..d19d1579c415 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -1106,7 +1106,7 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0, &rxmac->mif_ctrl);
writel(0, &rxmac->space_avail);
- /* Initialize the the mif_ctrl register
+ /* Initialize the mif_ctrl register
* bit 3: Receive code error. One or more nibbles were signaled as
* errors during the reception of the packet. Clear this
* bit in Gigabit, set it in 100Mbit. This was derived
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index b7d772f2dcbb..3c2e32fb7389 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -3,11 +3,12 @@
* Copyright (C) 2014 Altera Corporation. All rights reserved
*/
-#include <linux/kernel.h>
-
#ifndef __ALTERA_UTILS_H__
#define __ALTERA_UTILS_H__
+#include <linux/compiler.h>
+#include <linux/types.h>
+
void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 05ac8d9ccb2f..5d1baa01360f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1830,9 +1830,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
dev->max_mtu = AMD8111E_MAX_MTU;
netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32);
-#if AMD8111E_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-#endif
/* Probe the external PHY */
amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 895d35639129..c68ace804e37 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -230,7 +230,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
netif_dbg(pdata, drv, netdev,
- "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ "cap=%d, en=%#x, mbc=%d, delay=%d\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
/* Check PFC for supported number of traffic classes */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 4d46780fad13..f342bb853189 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1673,12 +1673,10 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
return ret;
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
- packet->header_len = skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
+ packet->header_len = skb_inner_tcp_all_headers(skb);
packet->tcp_header_len = inner_tcp_hdrlen(skb);
} else {
- packet->header_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ packet->header_len = skb_tcp_all_headers(skb);
packet->tcp_header_len = tcp_hdrlen(skb);
}
packet->tcp_payload_len = skb->len - packet->header_len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d9547552ceef..b875c430222e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -417,7 +417,7 @@ struct xgbe_rx_ring_data {
/* Structure used to hold information related to the descriptor
* and the packet associated with the descriptor (always use
- * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ * the XGBE_GET_DESC_DATA macro to access this data from the ring)
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
index b6119dcc3bb9..c2fda80fe1cc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -158,7 +158,7 @@ struct aq_mss_egress_class_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! 0: don't care and no LLC header exist.
@@ -422,7 +422,7 @@ struct aq_mss_ingress_preclass_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! Mask is per-byte.
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index cac509708e9d..e461f4764066 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -786,7 +786,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
return false;
}
-static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int sent = 0, bytes_compl = 0, n = 0;
@@ -825,7 +825,7 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
if (!skb)
continue;
- dev_kfree_skb_any(skb);
+ napi_consume_skb(skb, budget);
ring->buf[i].tx.skb = NULL;
bytes_compl += ring->buf[i].tx.len;
@@ -946,7 +946,7 @@ static unsigned int ag71xx_max_frame_len(unsigned int mtu)
return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
}
-static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
{
u32 t;
@@ -970,7 +970,7 @@ static void ag71xx_fast_reset(struct ag71xx *ag)
mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
- ag71xx_tx_packets(ag, true);
+ ag71xx_tx_packets(ag, true, 0);
reset_control_assert(ag->mac_reset);
usleep_range(10, 20);
@@ -1657,7 +1657,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pktlen;
- skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
+ skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
@@ -1703,7 +1703,7 @@ static int ag71xx_poll(struct napi_struct *napi, int limit)
int tx_done, rx_done;
u32 status;
- tx_done = ag71xx_tx_packets(ag, false);
+ tx_done = ag71xx_tx_packets(ag, false, limit);
netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
rx_done = ag71xx_rx_packets(ag, limit);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 49459397993e..be4b1f8eef29 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2072,7 +2072,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -2107,7 +2107,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2132,7 +2132,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
*tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2219,7 +2219,8 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2733,8 +2734,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
atl1c_clean_rx, 64);
for (i = 0; i < adapter->tx_queue_count; ++i)
- netif_napi_add(netdev, &adapter->tpd_ring[i].napi,
- atl1c_clean_tx, 64);
+ netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
+ atl1c_clean_tx);
timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
@@ -2849,7 +2850,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 20681860a599..57a51fb7746c 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1609,8 +1609,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
if (skb_is_gso(skb)) {
if (skb->protocol == htons(ETH_P_IP) ||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
- proto_hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb)) {
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
MAX_TX_BUF_LEN - 1) >>
@@ -1645,7 +1644,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
netdev_warn(adapter->netdev,
@@ -1713,7 +1712,8 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (segment) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
@@ -2482,7 +2482,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 6a969969d221..ff1fe09abf9f 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2115,7 +2115,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
ntohs(iph->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->len == hdr_len) {
iph->check = 0;
tcp_hdr(skb)->check =
@@ -2206,7 +2206,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (retval) {
/* TSO */
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
@@ -2367,8 +2367,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
- proto_hdr_len = (skb_transport_offset(skb) +
- tcp_hdrlen(skb));
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (unlikely(proto_hdr_len > len)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 698438a2ee0f..1c6aea12db72 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -388,7 +388,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_buf_size, DMA_FROM_DEVICE);
priv->rx_buf[desc_idx] = NULL;
- skb = build_skb(buf, priv->rx_frag_size);
+ skb = napi_build_skb(buf, priv->rx_frag_size);
if (unlikely(!skb)) {
skb_free_frag(buf);
dev->stats.rx_dropped++;
@@ -423,7 +423,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
/*
* try to or force reclaim of transmitted buffers
*/
-static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
+static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
{
struct bcm_enet_priv *priv;
unsigned int bytes;
@@ -468,7 +468,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
dev->stats.tx_errors++;
bytes += skb->len;
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, budget);
released++;
}
@@ -499,7 +499,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
ENETDMAC_IR, priv->tx_chan);
/* reclaim sent skb */
- bcm_enet_tx_reclaim(dev, 0);
+ bcm_enet_tx_reclaim(dev, 0, budget);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
@@ -1211,7 +1211,7 @@ static int bcm_enet_stop(struct net_device *dev)
bcm_enet_disable_mac(priv);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -1935,7 +1935,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enet_driver = {
+static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
.remove = bcm_enet_remove,
.driver = {
@@ -2362,7 +2362,7 @@ static int bcm_enetsw_stop(struct net_device *dev)
bcm_enet_disable_dma(priv, priv->rx_chan);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -2756,7 +2756,7 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enetsw_driver = {
+static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
.remove = bcm_enetsw_remove,
.driver = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5729a5ab059d..712b5595bc39 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3421,12 +3421,9 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Headers length */
if (xmit_type & XMIT_GSO_ENC)
- hlen = (int)(skb_inner_transport_header(skb) -
- skb->data) +
- inner_tcp_hdrlen(skb);
+ hlen = skb_inner_tcp_all_headers(skb);
else
- hlen = (int)(skb_transport_header(skb) -
- skb->data) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
/* Amount of data (w/o headers) on linear part of SKB*/
first_bd_sz = skb_headlen(skb) - hlen;
@@ -3534,15 +3531,13 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data;
+ return skb_inner_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_inner_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
}
/**
@@ -3568,12 +3563,12 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ return skb_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
+ return skb_transport_offset(skb) + sizeof(struct udphdr);
}
/* set FW indication according to inner or outer protocols if tunneled */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index cf9b00576ed3..ba0f1ffac507 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -535,12 +535,9 @@ normal_tx:
u32 hdr_len;
if (skb->encapsulation)
- hdr_len = skb_inner_network_offset(skb) +
- skb_inner_network_header_len(skb) +
- inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
else
- hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -4480,7 +4477,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
}
}
if (irq_reinit) {
- kfree(bp->ntp_fltr_bmap);
+ bitmap_free(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL;
}
bp->ntp_fltr_count = 0;
@@ -4499,9 +4496,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
- sizeof(long),
- GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -10659,7 +10654,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
- /* Flush rings and and disable interrupts */
+ /* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 6b3d4f4c2a75..14df8cfc2946 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -20,6 +20,8 @@
#include "bnxt_ulp.h"
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
+#include "bnxt_nvm_defs.h"
+#include "bnxt_ethtool.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
@@ -610,6 +612,63 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
return rc;
}
+static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ u32 datalen;
+ u16 index;
+ u8 *buf;
+
+ if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) || !datalen) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error");
+ return false;
+ }
+
+ buf = kzalloc(datalen, GFP_KERNEL);
+ if (!buf) {
+ NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test");
+ return false;
+ }
+
+ if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
+ goto err;
+ }
+
+ if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
+ BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
+ goto err;
+ }
+
+ return true;
+
+err:
+ kfree(buf);
+ return false;
+}
+
+static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ return id == DEVLINK_ATTR_SELFTEST_ID_FLASH;
+}
+
+static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl,
+ unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH)
+ return bnxt_nvm_test(bp, extack) ?
+ DEVLINK_SELFTEST_STATUS_PASS :
+ DEVLINK_SELFTEST_STATUS_FAIL;
+
+ return DEVLINK_SELFTEST_STATUS_SKIP;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -622,6 +681,8 @@ static const struct devlink_ops bnxt_dl_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = bnxt_dl_reload_down,
.reload_up = bnxt_dl_reload_up,
+ .selftest_check = bnxt_dl_selftest_check,
+ .selftest_run = bnxt_dl_selftest_run,
};
static const struct devlink_ops bnxt_vf_dl_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7191e5d74208..87eb5362ad70 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2176,14 +2176,14 @@ static void bnxt_print_admin_err(struct bnxt *bp)
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
-static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
- u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
- u32 dir_item_len, const u8 *data,
- size_t data_len)
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_write_input *req;
@@ -2836,8 +2836,8 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
return rc;
}
-static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
- u32 length, u8 *data)
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
@@ -2871,9 +2871,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length)
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
{
struct hwrm_nvm_find_dir_entry_output *output;
struct hwrm_nvm_find_dir_entry_input *req;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index a59284215e78..a8ecef8ab82c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -58,5 +58,17 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len);
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a1a2c7a64fd5..730febd19330 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -307,7 +307,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return -EINVAL;
}
- if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ if (min_tx_rate > pf_link_speed) {
netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
min_tx_rate, vf_id);
return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f7f10cfb3476..e86503d97f32 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
+ id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
@@ -669,7 +669,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
- kfree(id_tbl->table);
+ bitmap_free(id_tbl->table);
id_tbl->table = NULL;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c28f8cc00d1c..db1e9d810b41 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7944,7 +7944,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
/* HW/FW can not correctly segment packets that have been
* vlan encapsulated.
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f6fe08df568b..29dd0f93d6c0 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2823,8 +2823,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
return -EINVAL;
}
- if (unlikely((gso_size + skb_transport_offset(skb) +
- tcp_hdrlen(skb)) >= skb->len)) {
+ if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
@@ -2872,8 +2871,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) +
- tcp_hdrlen(skb))) {
+ skb_tcp_all_headers(skb))) {
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 7ca077b65eaa..9c410f93a103 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -717,14 +717,15 @@
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_MIIONRGMII 0x00000200
+#define MACB_CAPS_NEED_TSUCLK 0x00000400
+#define MACB_CAPS_PCS 0x01000000
+#define MACB_CAPS_HIGH_SPEED 0x02000000
#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_PCS 0x01000000
-#define MACB_CAPS_HIGH_SPEED 0x02000000
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d89098f4ede8..494fe961a49d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2267,7 +2267,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
/* if this is required, would need to copy to single buffer */
@@ -3482,7 +3482,8 @@ static int gem_add_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
spin_lock_irqsave(&bp->rx_fs_lock, flags);
@@ -3535,8 +3536,8 @@ static int gem_del_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc),
- htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
gem_writel_n(bp, SCRT2, fs->location, 0);
@@ -4600,6 +4601,40 @@ static int fu540_c000_init(struct platform_device *pdev)
return macb_init(pdev);
}
+static int init_reset_optional(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PHY device used in SGMII mode is ready */
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
+
+ if (IS_ERR(bp->sgmii_phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
+ "failed to get SGMII PHY\n");
+
+ ret = phy_init(bp->sgmii_phy);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to init SGMII PHY\n");
+ }
+
+ /* Fully reset controller at hardware level if mapped in device tree */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ phy_exit(bp->sgmii_phy);
+ return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ }
+
+ ret = macb_init(pdev);
+ if (ret)
+ phy_exit(bp->sgmii_phy);
+
+ return ret;
+}
+
static const struct macb_usrio_config sama7g5_usrio = {
.mii = 0,
.rmii = 1,
@@ -4626,8 +4661,8 @@ static const struct macb_config at91sam9260_config = {
};
static const struct macb_config sama5d3macb_config = {
- .caps = MACB_CAPS_SG_DISABLED
- | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_SG_DISABLED |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
.usrio = &macb_default_usrio,
@@ -4658,8 +4693,8 @@ static const struct macb_config sama5d29_config = {
};
static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
- | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4689,55 +4724,13 @@ static const struct macb_config np4_config = {
.usrio = &macb_default_usrio,
};
-static int zynqmp_init(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct macb *bp = netdev_priv(dev);
- int ret;
-
- if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- /* Ensure PS-GTR PHY device used in SGMII mode is ready */
- bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
-
- if (IS_ERR(bp->sgmii_phy)) {
- ret = PTR_ERR(bp->sgmii_phy);
- dev_err_probe(&pdev->dev, ret,
- "failed to get PS-GTR PHY\n");
- return ret;
- }
-
- ret = phy_init(bp->sgmii_phy);
- if (ret) {
- dev_err(&pdev->dev, "failed to init PS-GTR PHY: %d\n",
- ret);
- return ret;
- }
- }
-
- /* Fully reset GEM controller at hardware level using zynqmp-reset driver,
- * if mapped in device tree.
- */
- ret = device_reset_optional(&pdev->dev);
- if (ret) {
- dev_err_probe(&pdev->dev, ret, "failed to reset controller");
- phy_exit(bp->sgmii_phy);
- return ret;
- }
-
- ret = macb_init(pdev);
- if (ret)
- phy_exit(bp->sgmii_phy);
-
- return ret;
-}
-
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = zynqmp_init,
+ .init = init_reset_optional,
.jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4751,6 +4744,17 @@ static const struct macb_config zynq_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config mpfs_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
MACB_CAPS_MIIONRGMII,
@@ -4769,6 +4773,16 @@ static const struct macb_config sama7g5_emac_config = {
.usrio = &sama7g5_usrio,
};
+static const struct macb_config versal_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
+};
+
static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,at32ap7000-macb" },
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
@@ -4784,11 +4798,15 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
- { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
- { .compatible = "cdns,zynq-gem", .data = &zynq_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
+ { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
+ { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
+ { .compatible = "xlnx,versal-gem", .data = &versal_config},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4796,8 +4814,8 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static const struct macb_config default_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4913,8 +4931,8 @@ static int macb_probe(struct platform_device *pdev)
/* MTU range: 68 - 1500 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
- if (bp->caps & MACB_CAPS_JUMBO)
- dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
dev->max_mtu = ETH_DATA_LEN;
@@ -5198,7 +5216,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
if (!(device_may_wakeup(dev)))
macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
- else
+ else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
return 0;
@@ -5214,8 +5232,10 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
+ clk_prepare_enable(bp->tsu_clk);
+ } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
+ clk_prepare_enable(bp->tsu_clk);
}
- clk_prepare_enable(bp->tsu_clk);
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 9559c16078f9..e6cb20aaa76a 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -434,7 +434,7 @@ int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
return 0;
}
-static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
+static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
{
u32 reg_val;
@@ -444,8 +444,6 @@ static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE));
else
macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
-
- return 0;
}
int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -468,8 +466,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
case HWTSTAMP_TX_OFF:
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (gem_ptp_set_one_step_sync(bp, 1) != 0)
- return -ERANGE;
+ gem_ptp_set_one_step_sync(bp, 1);
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 4367edbdd579..06397cc8bb36 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1261,7 +1261,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int sh_len = skb_tcp_all_headers(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
@@ -1382,7 +1382,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
- hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_start = skb_tcp_all_headers(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4a872f328fea..7d5204834ee2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -85,7 +85,7 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
if (err) {
dev_err(adap->pdev_dev,
- "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+ "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, err=%d\n",
dcb_ver_array[dcb->dcb_version], app.selector,
app.protocol, -err);
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 7d49fd4edc9e..14e0d989c3ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3429,18 +3429,18 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
unsigned long *t;
struct adapter *adap = filp->private_data;
- t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ t = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!t)
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
if (err) {
- kfree(t);
+ bitmap_free(t);
return err;
}
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- kfree(t);
+ bitmap_free(t);
return count;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 6c790af92170..77897edd2bc0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -2227,7 +2227,7 @@ void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
if (eth_filter_info) {
for (i = 0; i < adap->params.nports; i++) {
kvfree(eth_filter_info[i].loc_array);
- kfree(eth_filter_info[i].bmap);
+ bitmap_free(eth_filter_info[i].bmap);
}
kfree(eth_filter_info);
}
@@ -2270,9 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
goto free_eth_finfo;
}
- eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
- sizeof(unsigned long),
- GFP_KERNEL);
+ eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
goto free_eth_finfo;
@@ -2284,7 +2282,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
free_eth_finfo:
while (i-- > 0) {
- kfree(eth_filter->port[i].bmap);
+ bitmap_free(eth_filter->port[i].bmap);
kvfree(eth_filter->port[i].loc_array);
}
kfree(eth_filter_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0c78c0db8937..d0061921529f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5047,28 +5047,24 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* Allocate the memory for the vaious egress queue bitmaps
* ie starving_fl, txq_maperr and blocked_fl.
*/
- adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.starving_fl) {
ret = -ENOMEM;
goto bye;
}
- adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.txq_maperr) {
ret = -ENOMEM;
goto bye;
}
#ifdef CONFIG_DEBUG_FS
- adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.blocked_fl) {
ret = -ENOMEM;
goto bye;
}
- bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
#endif
params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -5417,10 +5413,10 @@ bye:
adap_free_hma_mem(adap);
kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map);
- kfree(adap->sge.starving_fl);
- kfree(adap->sge.txq_maperr);
+ bitmap_free(adap->sge.starving_fl);
+ bitmap_free(adap->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adap->sge.blocked_fl);
+ bitmap_free(adap->sge.blocked_fl);
#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
@@ -5854,8 +5850,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
- sizeof(long), GFP_KERNEL);
+ adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
@@ -5870,7 +5865,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
static void free_msix_info(struct adapter *adap)
{
- kfree(adap->msix_bmap.msix_bmap);
+ bitmap_free(adap->msix_bmap.msix_bmap);
kfree(adap->msix_info);
}
@@ -6189,10 +6184,10 @@ static void free_some_resources(struct adapter *adapter)
cxgb4_cleanup_ethtool_filters(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
- kfree(adapter->sge.starving_fl);
- kfree(adapter->sge.txq_maperr);
+ bitmap_free(adapter->sge.starving_fl);
+ bitmap_free(adapter->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adapter->sge.blocked_fl);
+ bitmap_free(adapter->sge.blocked_fl);
#endif
disable_msi(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f889f404305c..ee52e3b1d74f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1531,7 +1531,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (cxgb4_is_ktls_skb(skb) &&
- (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
+ (skb->len - skb_tcp_all_headers(skb)))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 7de3800437c9..c2822e635f89 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2859,7 +2859,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
* address stored on the adapter
* @adapter: The adapter
*
- * Find the the port mask for the VF based on the index of mac
+ * Find the port mask for the VF based on the index of mac
* address stored in the adapter. If no mac address is stored on
* the adapter for the VF, use the port mask received from the
* firmware.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d546993bda09..1c52592d3b65 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -877,7 +877,7 @@ int t4vf_get_sge_params(struct adapter *adapter)
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
- * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * separately with the Padding Boundary in SGE_CONTROL and Packing
* Boundary in SGE_CONTROL2. So for T5 and later we need to grab
* SGE_CONTROL in order to determine how ingress packet data will be
* laid out in Packed Buffer Mode. Unfortunately, older versions of
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 60b648b46f75..bfee0e4e54b1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1012,7 +1012,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pktlen = skb_tcp_all_headers(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -1907,7 +1907,7 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
return 0;
th = tcp_hdr(nskb);
- skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
skb_tx_timestamp(nskb);
@@ -1938,7 +1938,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
tcp_seq = ntohl(th->seq);
- skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_offset = skb_tcp_all_headers(skb);
skb_data_len = skb->len - skb_offset;
data_len = skb_data_len;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 1c81b161de52..372fb7b3a282 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -680,11 +680,10 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
skb_frag_t *frag;
if (skb->encapsulation) {
- hdr_len = skb_inner_transport_header(skb) - skb->data;
- hdr_len += inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
} else {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index cd4e243da5fa..414362febbb9 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -737,9 +737,9 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static int be_gso_hdr_len(struct sk_buff *skb)
{
if (skb->encapsulation)
- return skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -3178,7 +3178,7 @@ static irqreturn_t be_intx(int irq, void *dev)
}
be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
- /* Return IRQ_HANDLED only for the the first spurious intr
+ /* Return IRQ_HANDLED only for the first spurious intr
* after a valid intr to stop the kernel from branding
* this irq as a bad one!
*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a90275143d87..e8e2aa1e7f01 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -691,7 +691,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
void *bufaddr;
unsigned long dmabuf;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 5ff2634bee2f..cb419aef8d1b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -201,7 +201,7 @@ void fs_enet_platform_cleanup(void);
/* access macros */
#if defined(CONFIG_CPM1)
-/* for a a CPM1 __raw_xxx's are sufficient */
+/* for a CPM1 __raw_xxx's are sufficient */
#define __cbd_out32(addr, x) __raw_writel(x, addr)
#define __cbd_out16(addr, x) __raw_writew(x, addr)
#define __cbd_in32(addr) __raw_readl(addr)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3dc9369a33f7..e7bf1524b68e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1944,6 +1944,7 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
+ skb_tx_timestamp(skb);
netdev_tx_sent_queue(txq, bytes_sent);
gfar_wmb();
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 9a2c16d69e2c..81fb68730138 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1457,6 +1457,7 @@ static int gfar_get_ts_info(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
return 0;
}
@@ -1474,7 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/fungible/funcore/fun_hci.h b/drivers/net/ethernet/fungible/funcore/fun_hci.h
index 257203e94b68..f21819670106 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_hci.h
+++ b/drivers/net/ethernet/fungible/funcore/fun_hci.h
@@ -442,6 +442,7 @@ enum fun_port_lane_attr {
};
enum fun_admin_port_subop {
+ FUN_ADMIN_PORT_SUBOP_XCVR_READ = 0x23,
FUN_ADMIN_PORT_SUBOP_INETADDR_EVENT = 0x24,
};
@@ -595,6 +596,19 @@ struct fun_admin_port_req {
struct fun_admin_read48_req read48[];
} read;
+ struct fun_admin_port_xcvr_read_req {
+ u8 subop;
+ u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+ } xcvr_read;
struct fun_admin_port_inetaddr_event_req {
__u8 subop;
__u8 rsvd0;
@@ -625,6 +639,15 @@ struct fun_admin_port_req {
.id = cpu_to_be32(_id), \
}
+#define FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(_flags, _id, _bank, _page, \
+ _offset, _length, _dev_addr) \
+ ((struct fun_admin_port_xcvr_read_req) { \
+ .subop = FUN_ADMIN_PORT_SUBOP_XCVR_READ, \
+ .flags = cpu_to_be16(_flags), .id = cpu_to_be32(_id), \
+ .bank = (_bank), .page = (_page), .offset = (_offset), \
+ .length = (_length), .dev_addr = (_dev_addr), \
+ })
+
struct fun_admin_port_rsp {
struct fun_admin_rsp_common common;
@@ -659,6 +682,23 @@ struct fun_admin_port_rsp {
} u;
};
+struct fun_admin_port_xcvr_read_rsp {
+ struct fun_admin_rsp_common common;
+
+ u8 subop;
+ u8 rsvd0[3];
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+
+ u8 data[128];
+};
+
enum fun_xcvr_type {
FUN_XCVR_BASET = 0x0,
FUN_XCVR_CU = 0x1,
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index d081168c95fa..31aa185f4d17 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -78,6 +78,7 @@ static const char * const txq_stat_names[] = {
"tx_cso",
"tx_tso",
"tx_encapsulated_tso",
+ "tx_uso",
"tx_more",
"tx_queue_stops",
"tx_queue_restarts",
@@ -778,6 +779,7 @@ static void fun_get_ethtool_stats(struct net_device *netdev,
ADD_STAT(txs.tx_cso);
ADD_STAT(txs.tx_tso);
ADD_STAT(txs.tx_encap_tso);
+ ADD_STAT(txs.tx_uso);
ADD_STAT(txs.tx_more);
ADD_STAT(txs.tx_nstops);
ADD_STAT(txs.tx_nrestarts);
@@ -1116,6 +1118,39 @@ static int fun_set_fecparam(struct net_device *netdev,
return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
}
+static int fun_get_port_module_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *req,
+ struct netlink_ext_ack *extack)
+{
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_xcvr_read_rsp rsp;
+ } cmd;
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Specified port is virtual, only physical ports have modules");
+ return -EOPNOTSUPP;
+ }
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.xcvr_read =
+ FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(0, netdev->dev_port,
+ req->bank, req->page,
+ req->offset, req->length,
+ req->i2c_address);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ memcpy(req->data, cmd.rsp.data, req->length);
+ return req->length;
+}
+
static const struct ethtool_ops fun_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1154,6 +1189,7 @@ static const struct ethtool_ops fun_ethtool_ops = {
.get_eth_mac_stats = fun_get_802_3_stats,
.get_eth_ctrl_stats = fun_get_802_3_ctrl_stats,
.get_rmon_stats = fun_get_rmon_stats,
+ .get_module_eeprom_by_page = fun_get_port_module_page,
};
void fun_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index 9485cf699c5d..f247b7ad3a88 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -1357,7 +1357,8 @@ static const struct net_device_ops fun_netdev_ops = {
#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
+ NETIF_F_GSO_UDP_L4)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
index 2f6698b98b03..706d81e39a54 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_tx.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
@@ -16,23 +16,24 @@
#define FUN_XDP_CLEAN_BATCH 16
/* DMA-map a packet and return the (length, DMA_address) pairs for its
- * segments. If a mapping error occurs -ENOMEM is returned.
+ * segments. If a mapping error occurs -ENOMEM is returned. The packet
+ * consists of an skb_shared_info and one additional address/length pair.
*/
-static int map_skb(const struct sk_buff *skb, struct device *dev,
- dma_addr_t *addr, unsigned int *len)
+static int fun_map_pkt(struct device *dev, const struct skb_shared_info *si,
+ void *data, unsigned int data_len,
+ dma_addr_t *addr, unsigned int *len)
{
- const struct skb_shared_info *si;
const skb_frag_t *fp, *end;
- *len = skb_headlen(skb);
- *addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE);
+ *len = data_len;
+ *addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
return -ENOMEM;
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
+ if (!si)
+ return 0;
- for (fp = si->frags; fp < end; fp++) {
+ for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) {
*++len = skb_frag_size(fp);
*++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
@@ -44,7 +45,7 @@ unwind:
while (fp-- > si->frags)
dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
- dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE);
return -ENOMEM;
}
@@ -71,6 +72,33 @@ static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
}
+/* Write a gather list to the Tx descriptor at @req from @ngle address/length
+ * pairs.
+ */
+static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
+ struct fun_eth_tx_req *req,
+ const dma_addr_t *addrs,
+ const unsigned int *lens,
+ unsigned int ngle)
+{
+ struct fun_dataop_gl *gle;
+ unsigned int i;
+
+ req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
+
+ for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
+ i < ngle && txq_to_end(q, gle); i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+
+ if (txq_to_end(q, gle) == 0) {
+ gle = (struct fun_dataop_gl *)q->desc;
+ for ( ; i < ngle; i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+ }
+
+ return gle;
+}
+
static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
{
return *(__be16 *)&tcp_flag_word(th);
@@ -83,7 +111,7 @@ static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
const struct fun_ktls_tx_ctx *tls_ctx;
u32 datalen, seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return skb;
@@ -129,10 +157,13 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
struct fun_eth_tx_req *req;
struct fun_dataop_gl *gle;
const struct tcphdr *th;
- unsigned int ngle, i;
+ unsigned int l4_hlen;
+ unsigned int ngle;
u16 flags;
- if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
+ shinfo = skb_shinfo(skb);
+ if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data,
+ skb_headlen(skb), addrs, lens))) {
FUN_QSTAT_INC(q, tx_map_err);
return 0;
}
@@ -145,7 +176,6 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
req->repr_idn = 0;
req->encap_proto = 0;
- shinfo = skb_shinfo(skb);
if (likely(shinfo->gso_size)) {
if (skb->encapsulation) {
u16 ol4_ofst;
@@ -178,6 +208,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
FUN_ETH_UPDATE_INNER_L3_LEN;
}
th = inner_tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
fun_eth_offload_init(&req->offload, flags,
shinfo->gso_size,
tcp_hdr_doff_flags(th), 0,
@@ -185,6 +216,24 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
skb_inner_transport_offset(skb),
skb_network_offset(skb), ol4_ofst);
FUN_QSTAT_INC(q, tx_encap_tso);
+ } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+ flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L4_LEN |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_INNER_IPV6;
+
+ l4_hlen = sizeof(struct udphdr);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ cpu_to_be16(l4_hlen << 10), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_uso);
} else {
/* HW considers one set of headers as inner */
flags = FUN_ETH_INNER_LSO |
@@ -195,6 +244,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
else
flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
th = tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
fun_eth_offload_init(&req->offload, flags,
shinfo->gso_size,
tcp_hdr_doff_flags(th), 0,
@@ -209,7 +259,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
extra_pkts = shinfo->gso_segs - 1;
extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
- __tcp_hdrlen(th)) * extra_pkts;
+ l4_hlen) * extra_pkts;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
if (skb->csum_offset == offsetof(struct udphdr, check))
@@ -222,18 +272,9 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
}
ngle = shinfo->nr_frags + 1;
- req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
- for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
- i < ngle && txq_to_end(q, gle); i++, gle++)
- fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
-
- if (txq_to_end(q, gle) == 0) {
- gle = (struct fun_dataop_gl *)q->desc;
- for ( ; i < ngle; i++, gle++)
- fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
- }
+ gle = fun_write_gl(q, req, addrs, lens, ngle);
if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
@@ -350,7 +391,7 @@ static u16 txq_hw_head(const struct funeth_txq *q)
/* Unmap the Tx packet starting at the given descriptor index and
* return the number of Tx descriptors it occupied.
*/
-static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx)
+static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
{
const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
unsigned int ngle = req->dataop.ngather;
@@ -398,7 +439,7 @@ static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
rmb();
do {
- unsigned int pkt_desc = unmap_skb(q, reclaim_idx);
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
struct sk_buff *skb = q->info[reclaim_idx].skb;
trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
@@ -440,20 +481,10 @@ int fun_txq_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx)
-{
- const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
- const struct fun_dataop_gl *gle;
-
- gle = (const struct fun_dataop_gl *)req->dataop.imm;
- dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
- be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
-}
-
-/* Reclaim up to @budget completed Tx descriptors from a TX XDP queue. */
+/* Reclaim up to @budget completed Tx packets from a TX XDP queue. */
static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
{
- unsigned int npkts = 0, head, reclaim_idx;
+ unsigned int npkts = 0, ndesc = 0, head, reclaim_idx;
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
@@ -465,38 +496,49 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
rmb();
do {
- fun_xdp_unmap(q, reclaim_idx);
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
+
xdp_return_frame(q->info[reclaim_idx].xdpf);
- trace_funeth_tx_free(q, reclaim_idx, 1, head);
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
- reclaim_idx = (reclaim_idx + 1) & q->mask;
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ ndesc += pkt_desc;
npkts++;
} while (reclaim_idx != head && npkts < budget);
}
- q->cons_cnt += npkts;
+ q->cons_cnt += ndesc;
return npkts;
}
bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
{
+ unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len;
+ const struct skb_shared_info *si = NULL;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t dma[MAX_SKB_FRAGS + 1];
struct fun_eth_tx_req *req;
- struct fun_dataop_gl *gle;
- unsigned int idx, len;
- dma_addr_t dma;
if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
- if (!unlikely(fun_txq_avail(q))) {
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ si = xdp_get_shared_info_from_frame(xdpf);
+ tot_len = xdp_get_frame_len(xdpf);
+ nfrags += si->nr_frags;
+ ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags *
+ sizeof(struct fun_dataop_gl)),
+ FUNETH_SQE_SIZE);
+ }
+
+ if (unlikely(fun_txq_avail(q) < ndesc)) {
FUN_QSTAT_INC(q, tx_xdp_full);
return false;
}
- len = xdpf->len;
- dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
+ if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma,
+ lens))) {
FUN_QSTAT_INC(q, tx_map_err);
return false;
}
@@ -504,26 +546,25 @@ bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
idx = q->prod_cnt & q->mask;
req = fun_tx_desc_addr(q, idx);
req->op = FUN_ETH_OP_TX;
- req->len8 = (sizeof(*req) + sizeof(*gle)) / 8;
+ req->len8 = 0;
req->flags = 0;
req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
req->repr_idn = 0;
req->encap_proto = 0;
fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
- req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+ req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len);
- gle = (struct fun_dataop_gl *)req->dataop.imm;
- fun_dataop_gl_init(gle, 0, 0, len, dma);
+ fun_write_gl(q, req, dma, lens, nfrags);
q->info[idx].xdpf = xdpf;
u64_stats_update_begin(&q->syncp);
- q->stats.tx_bytes += len;
+ q->stats.tx_bytes += tot_len;
q->stats.tx_pkts++;
u64_stats_update_end(&q->syncp);
- trace_funeth_tx(q, len, idx, 1);
- q->prod_cnt++;
+ trace_funeth_tx(q, tot_len, idx, nfrags);
+ q->prod_cnt += ndesc;
return true;
}
@@ -563,7 +604,7 @@ static void fun_txq_purge(struct funeth_txq *q)
while (q->cons_cnt != q->prod_cnt) {
unsigned int idx = q->cons_cnt & q->mask;
- q->cons_cnt += unmap_skb(q, idx);
+ q->cons_cnt += fun_unmap_pkt(q, idx);
dev_kfree_skb_any(q->info[idx].skb);
}
netdev_tx_reset_queue(q->ndq);
@@ -574,9 +615,8 @@ static void fun_xdpq_purge(struct funeth_txq *q)
while (q->cons_cnt != q->prod_cnt) {
unsigned int idx = q->cons_cnt & q->mask;
- fun_xdp_unmap(q, idx);
+ q->cons_cnt += fun_unmap_pkt(q, idx);
xdp_return_frame(q->info[idx].xdpf);
- q->cons_cnt++;
}
}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 8708e2895946..53b7e95213a8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -82,6 +82,7 @@ struct funeth_txq_stats { /* per Tx queue SW counters */
u64 tx_cso; /* # of packets with checksum offload */
u64 tx_tso; /* # of non-encapsulated TSO super-packets */
u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
+ u64 tx_uso; /* # of non-encapsulated UDP LSO super-packets */
u64 tx_more; /* # of DBs elided due to xmit_more */
u64 tx_nstops; /* # of times the queue has stopped */
u64 tx_nrestarts; /* # of times the queue has restarted */
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index ec394d991668..588d64819ed5 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -386,7 +386,7 @@ static int gve_prep_tso(struct sk_buff *skb)
(__force __wsum)htonl(paylen));
/* Compute length of segmentation header. */
- header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ header_len = skb_tcp_all_headers(skb);
break;
default:
return -EINVAL;
@@ -598,9 +598,9 @@ static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
*/
static bool gve_can_send_tso(const struct sk_buff *skb)
{
- const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
int cur_seg_size;
@@ -795,7 +795,7 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
/* No outstanding miss completion but packet allocated
* implies packet receives a re-injection completion
- * without a a prior miss completion. Return without
+ * without a prior miss completion. Return without
* completing the packet.
*/
net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 2f0bd21a9082..d94cc8c6681f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -31,8 +31,6 @@
#define HNS_BUFFER_SIZE_2048 2048
#define BD_MAX_SEND_SIZE 8191
-#define SKB_TMP_LEN(SKB) \
- (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
int send_sz, dma_addr_t dma, int frag_end,
@@ -94,7 +92,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
@@ -108,7 +106,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
}
desc->tx.ip_offset = ip_offset;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index ae56306400b8..35d70041b9e8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1838,9 +1838,9 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
{
if (!skb->encapsulation)
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_tcp_all_headers(skb);
- return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
}
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index 5153e5d41bbd..b8a1ecb4b8fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -37,8 +37,7 @@ DECLARE_EVENT_CLASS(hns3_skb_template,
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
__entry->hdr_len = skb->encapsulation ?
- skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
- skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb);
__entry->ip_summed = skb->ip_summed;
__entry->fraglist = skb_has_frag_list(skb);
hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5eaf09ea4009..26f87330173e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -979,7 +979,7 @@ static int hclgevf_update_mac_list(struct hnae3_handle *handle,
/* if the mac addr is already in the mac list, no need to add a new
* one into it, just check the mac addr state, convert it to a new
- * new state, or just remove it, or do nothing.
+ * state, or just remove it, or do nothing.
*/
mac_node = hclgevf_find_mac_node(list, addr);
if (mac_node) {
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 07fdab58001d..c2ae1b4f9a5f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -174,7 +174,7 @@ static int hns_mdio_wait_ready(struct mii_bus *bus)
u32 cmd_reg_value;
int i;
- /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+ /* waiting for MDIO_COMMAND_REG's mdio_start==0 */
/* after that can do read or write*/
for (i = 0; i < MDIO_TIMEOUT; i++) {
cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
@@ -319,7 +319,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
MDIO_C45_READ, phy_id, devad);
}
- /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* Step 5: waiting for MDIO_COMMAND_REG's mdio_start==0,*/
/* check for read or write opt is finished */
ret = hns_mdio_wait_ready(bus);
if (ret) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index fb3e89141a0d..a4fbf44f944c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -95,9 +95,6 @@ struct hinic_dev {
u16 sq_depth;
u16 rq_depth;
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-
u8 rss_tmpl_idx;
u8 rss_hash_engine;
u16 num_rss;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 05329292d940..c23ee2ddbce3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -62,8 +62,6 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define HINIC_LRO_RX_TIMER_DEFAULT 16
-#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
-
#define work_to_rx_mode_work(work) \
container_of(work, struct hinic_rx_mode_work, work)
@@ -82,56 +80,44 @@ static int set_features(struct hinic_dev *nic_dev,
netdev_features_t pre_features,
netdev_features_t features, bool force_change);
-static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
+static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
{
- struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
struct hinic_rxq_stats rx_stats;
- u64_stats_init(&rx_stats.syncp);
-
hinic_rxq_get_stats(rxq, &rx_stats);
- u64_stats_update_begin(&nic_rx_stats->syncp);
nic_rx_stats->bytes += rx_stats.bytes;
nic_rx_stats->pkts += rx_stats.pkts;
nic_rx_stats->errors += rx_stats.errors;
nic_rx_stats->csum_errors += rx_stats.csum_errors;
nic_rx_stats->other_errors += rx_stats.other_errors;
- u64_stats_update_end(&nic_rx_stats->syncp);
-
- hinic_rxq_clean_stats(rxq);
}
-static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
+static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
{
- struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
struct hinic_txq_stats tx_stats;
- u64_stats_init(&tx_stats.syncp);
-
hinic_txq_get_stats(txq, &tx_stats);
- u64_stats_update_begin(&nic_tx_stats->syncp);
nic_tx_stats->bytes += tx_stats.bytes;
nic_tx_stats->pkts += tx_stats.pkts;
nic_tx_stats->tx_busy += tx_stats.tx_busy;
nic_tx_stats->tx_wake += tx_stats.tx_wake;
nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
- u64_stats_update_end(&nic_tx_stats->syncp);
-
- hinic_txq_clean_stats(txq);
}
-static void update_nic_stats(struct hinic_dev *nic_dev)
+static void gather_nic_stats(struct hinic_dev *nic_dev,
+ struct hinic_rxq_stats *nic_rx_stats,
+ struct hinic_txq_stats *nic_tx_stats)
{
int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
for (i = 0; i < num_qps; i++)
- update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
+ gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
for (i = 0; i < num_qps; i++)
- update_tx_stats(nic_dev, &nic_dev->txqs[i]);
+ gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
}
/**
@@ -560,8 +546,6 @@ int hinic_close(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- update_nic_stats(nic_dev);
-
up(&nic_dev->mgmt_lock);
if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
@@ -855,26 +839,19 @@ static void hinic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rxq_stats *nic_rx_stats;
- struct hinic_txq_stats *nic_tx_stats;
-
- nic_rx_stats = &nic_dev->rx_stats;
- nic_tx_stats = &nic_dev->tx_stats;
-
- down(&nic_dev->mgmt_lock);
+ struct hinic_rxq_stats nic_rx_stats = {};
+ struct hinic_txq_stats nic_tx_stats = {};
if (nic_dev->flags & HINIC_INTF_UP)
- update_nic_stats(nic_dev);
-
- up(&nic_dev->mgmt_lock);
+ gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
- stats->rx_bytes = nic_rx_stats->bytes;
- stats->rx_packets = nic_rx_stats->pkts;
- stats->rx_errors = nic_rx_stats->errors;
+ stats->rx_bytes = nic_rx_stats.bytes;
+ stats->rx_packets = nic_rx_stats.pkts;
+ stats->rx_errors = nic_rx_stats.errors;
- stats->tx_bytes = nic_tx_stats->bytes;
- stats->tx_packets = nic_tx_stats->pkts;
- stats->tx_errors = nic_tx_stats->tx_dropped;
+ stats->tx_bytes = nic_tx_stats.bytes;
+ stats->tx_packets = nic_tx_stats.pkts;
+ stats->tx_errors = nic_tx_stats.tx_dropped;
}
static int hinic_set_features(struct net_device *netdev,
@@ -1173,8 +1150,6 @@ static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
static int nic_dev_init(struct pci_dev *pdev)
{
struct hinic_rx_mode_work *rx_mode_work;
- struct hinic_txq_stats *tx_stats;
- struct hinic_rxq_stats *rx_stats;
struct hinic_dev *nic_dev;
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -1236,15 +1211,8 @@ static int nic_dev_init(struct pci_dev *pdev)
sema_init(&nic_dev->mgmt_lock, 1);
- tx_stats = &nic_dev->tx_stats;
- rx_stats = &nic_dev->rx_stats;
-
- u64_stats_init(&tx_stats->syncp);
- u64_stats_init(&rx_stats->syncp);
-
- nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
- VLAN_BITMAP_SIZE(nic_dev),
- GFP_KERNEL);
+ nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
err = -ENOMEM;
goto err_vlan_bitmap;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 24b7b819dbfb..a866bea65110 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -73,7 +73,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
@@ -83,7 +82,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index 01e7d3c0b68e..df555847afb5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -852,12 +852,6 @@ int hinic_ndo_set_vf_bw(struct net_device *netdev,
return -EINVAL;
}
- if (max_tx_rate < min_tx_rate) {
- netif_err(nic_dev, drv, netdev, "Max rate %d must be greater than or equal to min rate %d\n",
- max_tx_rate, min_tx_rate);
- return -EINVAL;
- }
-
err = hinic_port_link_state(nic_dev, &link_state);
if (err) {
netif_err(nic_dev, drv, netdev,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 87408e7bb809..5051cdff2384 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -98,7 +98,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
@@ -108,7 +107,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 8ce3348edf08..5dc302880f5f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1617,7 +1617,7 @@ static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
* For TSO packets we only copy the headers into the
* immediate area.
*/
- immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ immediate_len = skb_tcp_all_headers(skb);
}
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 36418b510dde..11a884aa5082 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1430,7 +1430,6 @@ static int e100_phy_check_without_mii(struct nic *nic)
#define MII_NSC_CONG MII_RESV1
#define NSC_CONG_ENABLE 0x0100
#define NSC_CONG_TXREADY 0x0400
-#define ADVERTISE_FC_SUPPORTED 0x0400
static int e100_phy_init(struct nic *nic)
{
struct net_device *netdev = nic->netdev;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 1042e79a1397..4542e2bc28e8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -2000,7 +2000,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
@@ -4376,7 +4376,7 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
/**
* e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
* @hw: Struct containing variables accessed by shared code
- * @offset: Offset in VLAN filer table to write
+ * @offset: Offset in VLAN filter table to write
* @value: Value to write into VLAN filter table
*/
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
@@ -4396,7 +4396,7 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
}
/**
- * e1000_clear_vfta - Clears the VLAN filer table
+ * e1000_clear_vfta - Clears the VLAN filter table
* @hw: Struct containing variables accessed by shared code
*/
static void e1000_clear_vfta(struct e1000_hw *hw)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3f5feb55cfba..23299fc56199 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2708,7 +2708,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -3139,7 +3139,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
case e1000_82544: {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 4d4f5bf1e516..f4154ca7fcb4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -82,7 +82,6 @@ E1000_PARAM(Duplex, "Duplex setting");
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
-#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
@@ -95,7 +94,6 @@ E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 8d06c9d8ff8b..e8a9a9610ac6 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -329,7 +329,7 @@ struct e1000_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct pm_qos_request pm_qos_req;
- s32 ptp_delta;
+ long ptp_delta;
u16 eee_advert;
};
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 51512a73fdd0..5df7ad93f3d7 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -957,7 +957,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f1729940e46c..321f2a95ae3a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3922,9 +3922,9 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return;
- if (info->adjfreq) {
+ if (info->adjfine) {
/* restore the previous ptp frequency delta */
- ret_val = info->adjfreq(info, adapter->ptp_delta);
+ ret_val = info->adjfine(info, adapter->ptp_delta);
} else {
/* set the default base frequency if no adjustment possible */
ret_val = e1000e_get_base_timinca(adapter, &timinca);
@@ -5474,7 +5474,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -5846,7 +5846,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* points to just header, pull a few bytes of payload from
* frags into skb->data
*/
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
/* we do this workaround for ES2LAN, but it is un-necessary,
* avoiding it could save a lot of cycles
*/
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ebe121db4307..3132d8f2f207 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -101,8 +101,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
* demoted to the most advanced interrupt mode available.
*/
E1000_PARAM(IntMode, "Interrupt Mode");
-#define MAX_INTMODE 2
-#define MIN_INTMODE 0
/* Enable Smart Power Down of the PHY
*
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index eb5c014c02fb..0e488e4fa5c1 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -15,14 +15,16 @@
#endif
/**
- * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
+ * e1000e_phc_adjfine - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
- * @delta: Desired frequency change in parts per billion
+ * @delta: Desired frequency chance in scaled parts per million
*
* Adjust the frequency of the PHC cycle counter by the indicated delta from
* the base frequency.
+ *
+ * Scaled parts per million is ppm but with a 16 bit binary fractional field.
**/
-static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
@@ -33,9 +35,6 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
u32 timinca, incvalue;
s32 ret_val;
- if ((delta > ptp->max_adj) || (delta <= -1000000000))
- return -EINVAL;
-
if (delta < 0) {
neg_adj = true;
delta = -delta;
@@ -50,9 +49,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
- adjustment = incvalue;
- adjustment *= delta;
- adjustment = div_u64(adjustment, 1000000000);
+ adjustment = mul_u64_u64_div_u64(incvalue, (u64)delta,
+ 1000000ULL << 16);
incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
@@ -260,7 +258,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = e1000e_phc_adjfreq,
+ .adjfine = e1000e_phc_adjfine,
.adjtime = e1000e_phc_adjtime,
.gettimex64 = e1000e_phc_gettimex,
.settime64 = e1000e_phc_settime,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 30ca9ee1900b..87fa5874f16e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -809,7 +809,7 @@ static s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
- * This function copies the message from the the message array to mbmem
+ * This function copies the message from the message array to mbmem
**/
static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
{
@@ -1825,7 +1825,7 @@ static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
fm10k_sm_mbx_connect_reset(mbx);
break;
case FM10K_STATE_CONNECT:
- /* try connnecting at lower version */
+ /* try connecting at lower version */
if (mbx->remote) {
while (mbx->local > 1)
mbx->local--;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index f6d56867f857..75cbdf2dbbe3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -78,7 +78,7 @@ static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
* @string: Pointer to location of destination string
*
* This function pulls the string back out of the attribute and will place
- * it in the array pointed by by string. It will return success if provided
+ * it in the array pointed by string. It will return success if provided
* with a valid pointers.
**/
static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
@@ -584,7 +584,7 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* @mbx: Unused mailbox pointer
*
* This function is a default handler for unrecognized messages. At a
- * a minimum it just indicates that the message requested was
+ * minimum it just indicates that the message requested was
* unimplemented.
**/
s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 407fe8f340a0..d86b6d349ea9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -566,6 +566,7 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
+#define I40E_FLAG_VF_VLAN_PRUNING BIT(27)
/* TOTAL_PORT_SHUTDOWN
* Allows to physically disable the link on the NIC's port.
* If enabled, (after link down request from the OS)
@@ -1291,4 +1292,18 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
struct i40e_cloud_filter *filter,
bool add);
+
+/**
+ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
+ * @pf: pointer to a pf.
+ *
+ * Check and return value of flag I40E_FLAG_TC_MQPRIO.
+ *
+ * Return: I40E_FLAG_TC_MQPRIO set state.
+ **/
+static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+{
+ return pf->flags & I40E_FLAG_TC_MQPRIO;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 19704f5c8291..156e92c43780 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -236,8 +236,6 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
-#define I40E_QUEUE_STAT(_name, _stat) \
- I40E_STAT(struct i40e_ring, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -457,6 +455,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
+ I40E_PRIV_FLAG("vf-vlan-pruning",
+ I40E_FLAG_VF_VLAN_PRUNING, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1141,6 +1141,71 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#define I40E_LBIT_SIZE 8
+/**
+ * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed
+ * @speed: speed in decimal
+ * @ks: ethtool ksettings
+ *
+ * Return i40e_aq_link_speed based on speed
+ **/
+static enum i40e_aq_link_speed
+i40e_speed_to_link_speed(__u32 speed, const struct ethtool_link_ksettings *ks)
+{
+ enum i40e_aq_link_speed link_speed = I40E_LINK_SPEED_UNKNOWN;
+ bool speed_changed = false;
+ int i, j;
+
+ static const struct {
+ __u32 speed;
+ enum i40e_aq_link_speed link_speed;
+ __u8 bit[I40E_LBIT_SIZE];
+ } i40e_speed_lut[] = {
+#define I40E_LBIT(mode) ETHTOOL_LINK_MODE_ ## mode ##_Full_BIT
+ {SPEED_100, I40E_LINK_SPEED_100MB, {I40E_LBIT(100baseT)} },
+ {SPEED_1000, I40E_LINK_SPEED_1GB,
+ {I40E_LBIT(1000baseT), I40E_LBIT(1000baseX),
+ I40E_LBIT(1000baseKX)} },
+ {SPEED_10000, I40E_LINK_SPEED_10GB,
+ {I40E_LBIT(10000baseT), I40E_LBIT(10000baseKR),
+ I40E_LBIT(10000baseLR), I40E_LBIT(10000baseCR),
+ I40E_LBIT(10000baseSR), I40E_LBIT(10000baseKX4)} },
+
+ {SPEED_25000, I40E_LINK_SPEED_25GB,
+ {I40E_LBIT(25000baseCR), I40E_LBIT(25000baseKR),
+ I40E_LBIT(25000baseSR)} },
+ {SPEED_40000, I40E_LINK_SPEED_40GB,
+ {I40E_LBIT(40000baseKR4), I40E_LBIT(40000baseCR4),
+ I40E_LBIT(40000baseSR4), I40E_LBIT(40000baseLR4)} },
+ {SPEED_20000, I40E_LINK_SPEED_20GB,
+ {I40E_LBIT(20000baseKR2)} },
+ {SPEED_2500, I40E_LINK_SPEED_2_5GB, {I40E_LBIT(2500baseT)} },
+ {SPEED_5000, I40E_LINK_SPEED_5GB, {I40E_LBIT(2500baseT)} }
+#undef I40E_LBIT
+};
+
+ for (i = 0; i < ARRAY_SIZE(i40e_speed_lut); i++) {
+ if (i40e_speed_lut[i].speed == speed) {
+ for (j = 0; j < I40E_LBIT_SIZE; j++) {
+ if (test_bit(i40e_speed_lut[i].bit[j],
+ ks->link_modes.supported)) {
+ speed_changed = true;
+ break;
+ }
+ if (!i40e_speed_lut[i].bit[j])
+ break;
+ }
+ if (speed_changed) {
+ link_speed = i40e_speed_lut[i].link_speed;
+ break;
+ }
+ }
+ }
+ return link_speed;
+}
+
+#undef I40E_LBIT_SIZE
+
/**
* i40e_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
@@ -1157,12 +1222,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings copy_ks;
struct i40e_aq_set_phy_config config;
struct i40e_pf *pf = np->vsi->back;
+ enum i40e_aq_link_speed link_speed;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
bool autoneg_changed = false;
i40e_status status = 0;
int timeout = 50;
int err = 0;
+ __u32 speed;
u8 autoneg;
/* Changing port settings is not supported if this isn't the
@@ -1195,6 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
+ speed = copy_ks.base.speed;
/* get our own copy of the bits to check against */
memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
@@ -1213,6 +1281,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* set autoneg back to what it currently is */
copy_ks.base.autoneg = safe_ks.base.autoneg;
+ copy_ks.base.speed = safe_ks.base.speed;
/* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support.
@@ -1329,6 +1398,27 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
40000baseLR4_Full))
config.link_speed |= I40E_LINK_SPEED_40GB;
+ /* Autonegotiation must be disabled to change speed */
+ if ((speed != SPEED_UNKNOWN && safe_ks.base.speed != speed) &&
+ (autoneg == AUTONEG_DISABLE ||
+ (safe_ks.base.autoneg == AUTONEG_DISABLE && !autoneg_changed))) {
+ link_speed = i40e_speed_to_link_speed(speed, ks);
+ if (link_speed == I40E_LINK_SPEED_UNKNOWN) {
+ netdev_info(netdev, "Given speed is not supported\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ } else {
+ config.link_speed = link_speed;
+ }
+ } else {
+ if (safe_ks.base.speed != speed) {
+ netdev_info(netdev,
+ "Unable to set speed, disable autoneg\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ }
+
/* If speed didn't get set, set it to what it currently is.
* This is needed because if advertise is 0 (as it is when autoneg
* is disabled) then speed won't get set.
@@ -4931,7 +5021,7 @@ static int i40e_set_channels(struct net_device *dev,
/* We do not support setting channels via ethtool when TCs are
* configured through mqprio
*/
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return -EINVAL;
/* verify they are not requesting separate vectors */
@@ -5294,6 +5384,13 @@ flags_complete:
return -EOPNOTSUPP;
}
+ if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) &&
+ pf->num_alloc_vfs) {
+ dev_warn(&pf->pdev->dev,
+ "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if ((changed_flags & new_flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(new_flags & I40E_FLAG_MFP_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 685556e968f2..b36bf9c3e1e4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1442,6 +1442,114 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
}
/**
+ * i40e_get_vf_new_vlan - Get new vlan id on a vf
+ * @vsi: the vsi to configure
+ * @new_mac: new mac filter to be added
+ * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Get new VLAN id based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * Returns the value of the new vlan filter or
+ * the old value if no new filter is needed.
+ */
+static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
+ struct i40e_new_mac_filter *new_mac,
+ struct i40e_mac_filter *f,
+ int vlan_filters,
+ bool trusted)
+{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
+ struct i40e_pf *pf = vsi->back;
+ bool is_any;
+
+ if (new_mac)
+ f = new_mac->f;
+
+ if (pvid && f->vlan != pvid)
+ return pvid;
+
+ is_any = (trusted ||
+ !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
+
+ if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (is_any && !vlan_filters && f->vlan == 0)) {
+ if (is_any)
+ return I40E_VLAN_ANY;
+ else
+ return 0;
+ }
+
+ return f->vlan;
+}
+
+/**
+ * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
+ * @vsi: the vsi to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Correct VF VLAN filters based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters,
+ bool trusted)
+{
+ struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new_mac;
+ struct hlist_node *h;
+ int bkt, new_vlan;
+
+ hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
+ new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
+ vlan_filters, trusted);
+ }
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
+ trusted);
+ if (new_vlan != f->vlan) {
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
+ /* Create a temporary i40e_new_mac_filter */
+ new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
+ if (!new_mac)
+ return -ENOMEM;
+ new_mac->f = add_head;
+ new_mac->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new_mac->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
+ }
+ }
+
+ vsi->has_vlan_filter = !!vlan_filters;
+ return 0;
+}
+
+/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
@@ -2500,10 +2608,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vlan_filters++;
}
- retval = i40e_correct_mac_vlan_filters(vsi,
- &tmp_add_list,
- &tmp_del_list,
- vlan_filters);
+ if (vsi->type != I40E_VSI_SRIOV)
+ retval = i40e_correct_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters);
+ else
+ retval = i40e_correct_vf_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters, pf->vf[vsi->vf_id].trusted);
hlist_for_each_entry(new, &tmp_add_list, hlist)
netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
@@ -2932,8 +3044,21 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
int bkt;
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
- if (f->state == I40E_FILTER_REMOVE)
+ /* If we're asked to add a filter that has been marked for
+ * removal, it is safe to simply restore it to active state.
+ * __i40e_del_filter will have simply deleted any filters which
+ * were previously marked NEW or FAILED, so if it is currently
+ * marked REMOVE it must have previously been ACTIVE. Since we
+ * haven't yet run the sync filters task, just restore this
+ * filter to the ACTIVE state so that the sync task leaves it
+ * in place.
+ */
+ if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
+ f->state = I40E_FILTER_ACTIVE;
+ continue;
+ } else if (f->state == I40E_FILTER_REMOVE) {
continue;
+ }
add_f = i40e_add_filter(vsi, f->macaddr, vid);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
@@ -4114,7 +4239,6 @@ static void i40e_free_misc_vector(struct i40e_pf *pf)
i40e_flush(&pf->hw);
if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
@@ -4853,7 +4977,6 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
irq_update_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
@@ -5337,7 +5460,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
u8 num_tc = 0;
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
@@ -5369,7 +5492,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return i40e_mqprio_get_enabled_tc(pf);
/* If neither MQPRIO nor DCB is enabled for this PF then just return
@@ -5466,7 +5589,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
int i;
/* There is no need to reset BW when mqprio mode is on. */
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return 0;
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
@@ -5538,7 +5661,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
vsi->tc_config.tc_info[i].qoffset);
}
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return;
/* Assign UP2TC map for the VSI */
@@ -5699,7 +5822,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.info = vsi->info;
- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
if (ret)
goto out;
@@ -6423,7 +6546,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
if (vsi->type == I40E_VSI_MAIN) {
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
else
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
@@ -7817,7 +7940,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
return ERR_PTR(-EINVAL);
}
- if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
return ERR_PTR(-EINVAL);
}
@@ -8070,7 +8193,7 @@ config_tc:
/* Quiesce VSI queues */
i40e_quiesce_vsi(vsi);
- if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
+ if (!hw && !i40e_is_tc_mqprio_enabled(pf))
i40e_remove_queue_channels(vsi);
/* Configure VSI for enabled TCs */
@@ -8094,7 +8217,7 @@ config_tc:
"Setup channel (id:%u) utilizing num_queues %d\n",
vsi->seid, vsi->tc_config.tc_info[0].qcount);
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
@@ -10748,7 +10871,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* unless I40E_FLAG_TC_MQPRIO was enabled or DCB
* is not supported with new link speed
*/
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
i40e_aq_set_dcb_parameters(hw, false, NULL);
} else {
if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
@@ -13107,7 +13230,7 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 61e5789d78db..2d3533f38d7b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -27,7 +27,6 @@
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
-#define to_dev(obj) container_of(obj, struct device, kobj)
enum i40e_ptp_pin {
SDP3_2 = 0,
@@ -335,44 +334,37 @@ static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
}
/**
- * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * i40e_ptp_adjfine - Adjust the PHC frequency
* @ptp: The PTP clock structure
- * @ppb: Parts per billion adjustment from the base
+ * @scaled_ppm: Scaled parts per million adjustment from base
*
- * Adjust the frequency of the PHC by the indicated parts per billion from the
- * base frequency.
+ * Adjust the frequency of the PHC by the indicated delta from the base
+ * frequency.
+ *
+ * Scaled parts per million is ppm with a 16 bit binary fractional field.
**/
-static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct i40e_hw *hw = &pf->hw;
u64 adj, freq, diff;
int neg_adj = 0;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- freq = I40E_PTP_40GB_INCVAL;
- freq *= ppb;
- diff = div_u64(freq, 1000000000ULL);
+ smp_mb(); /* Force any pending update before accessing. */
+ freq = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult);
+ diff = mul_u64_u64_div_u64(freq, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
adj = I40E_PTP_40GB_INCVAL - diff;
else
adj = I40E_PTP_40GB_INCVAL + diff;
- /* At some link speeds, the base incval is so large that directly
- * multiplying by ppb would result in arithmetic overflow even when
- * using a u64. Avoid this by instead calculating the new incval
- * always in terms of the 40GbE clock rate and then multiplying by the
- * link speed factor afterwards. This does result in slightly lower
- * precision at lower link speeds, but it is fairly minor.
- */
- smp_mb(); /* Force any pending update before accessing. */
- adj *= READ_ONCE(pf->ptp_adj_mult);
-
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
@@ -1402,7 +1394,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
- pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+ pf->ptp_caps.adjfine = i40e_ptp_adjfine;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
pf->ptp_caps.settime64 = i40e_ptp_settime;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 7bc1174edf6b..f6ba97a0166e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -372,7 +372,6 @@ static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
}
}
-#define IP_HEADER_OFFSET 14
#define I40E_UDPIP_DUMMY_PACKET_LEN 42
#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
/**
@@ -1483,10 +1482,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
@@ -2291,16 +2288,14 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
* i40e_run_xdp - run an XDP program
* @rx_ring: Rx ring being processed
* @xdp: XDP buffer containing the frame
+ * @xdp_prog: XDP program to run
**/
-static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
if (!xdp_prog)
goto xdp_out;
@@ -2445,6 +2440,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int offset = rx_ring->rx_offset;
struct sk_buff *skb = rx_ring->skb;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
struct xdp_buff xdp;
int xdp_res = 0;
@@ -2454,6 +2450,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
@@ -2509,11 +2507,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- xdp_res = i40e_run_xdp(rx_ring, &xdp);
+ xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog);
}
if (xdp_res) {
@@ -3713,35 +3712,55 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring)
{
- u16 i = xdp_ring->next_to_use;
- struct i40e_tx_buffer *tx_bi;
- struct i40e_tx_desc *tx_desc;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 i = 0, index = xdp_ring->next_to_use;
+ struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
+ struct i40e_tx_buffer *tx_bi = tx_head;
+ struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
void *data = xdpf->data;
u32 size = xdpf->len;
- dma_addr_t dma;
- if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
+ if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
- dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(xdp_ring->dev, dma))
- return I40E_XDP_CONSUMED;
- tx_bi = &xdp_ring->tx_bi[i];
- tx_bi->bytecount = size;
- tx_bi->gso_segs = 1;
- tx_bi->xdpf = xdpf;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- /* record length, and DMA address */
- dma_unmap_len_set(tx_bi, len, size);
- dma_unmap_addr_set(tx_bi, dma, dma);
+ for (;;) {
+ dma_addr_t dma;
- tx_desc = I40E_TX_DESC(xdp_ring, i);
- tx_desc->buffer_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
- | I40E_TXD_CMD,
- 0, size, 0);
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ goto unmap;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
+
+ if (++index == xdp_ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_bi = &xdp_ring->tx_bi[index];
+ tx_desc = I40E_TX_DESC(xdp_ring, index);
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ size = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
@@ -3749,14 +3768,30 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
smp_wmb();
xdp_ring->xdp_tx_active++;
- i++;
- if (i == xdp_ring->count)
- i = 0;
- tx_bi->next_to_watch = tx_desc;
- xdp_ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = index;
return I40E_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_bi = &xdp_ring->tx_bi[index];
+ if (dma_unmap_len(tx_bi, len))
+ dma_unmap_page(xdp_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+ if (tx_bi == tx_head)
+ break;
+
+ if (!index)
+ index += xdp_ring->count;
+ index--;
+ }
+
+ return I40E_XDP_CONSUMED;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 86b0f21287dc..4f184c50f6e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4353,6 +4353,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
+ i40e_vlan_stripping_enable(vsi);
i40e_vc_reset_vf(vf, true);
/* During reset the VF got a new VSI, so refresh a pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -4368,7 +4369,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
* MAC addresses deleted.
*/
if ((!(vlan_id || qos) ||
- vlanprio != le16_to_cpu(vsi->info.pvid)) &&
+ vlanprio != le16_to_cpu(vsi->info.pvid)) &&
vsi->info.pvid) {
ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
if (ret) {
@@ -4731,6 +4732,11 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
vf->trusted = setting;
+
+ /* request PF to sync mac/vlan filters for the VF */
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+
i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index af3e7e6afc85..6d4009e0cbd6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -143,20 +143,17 @@ int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
* i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
*
* Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/
-static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- /* NB! xdp_prog will always be !NULL, due to the fact that
- * this path is enabled by setting an XDP program.
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
@@ -339,9 +336,15 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
u16 cleaned_count;
+ /* NB! xdp_prog will always be !NULL, due to the fact that
+ * this path is enabled by setting an XDP program.
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
unsigned int rx_packets;
@@ -378,7 +381,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
- xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
&rx_bytes, size, xdp_res, &failure);
if (failure)
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 0ea0361cd86b..3f6187c16424 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -92,6 +92,7 @@ struct iavf_vsi {
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
+#define IAVF_MBPS_QUANTA 50
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
(IAVF_MAX_VF_VSI * \
@@ -145,7 +146,8 @@ struct iavf_mac_filter {
u8 remove:1; /* filter needs to be removed */
u8 add:1; /* filter needs to be added */
u8 is_primary:1; /* filter is a default VF MAC */
- u8 padding:4;
+ u8 add_handled:1; /* received response for filter add */
+ u8 padding:3;
};
};
@@ -251,6 +253,7 @@ struct iavf_adapter {
struct work_struct adminq_task;
struct delayed_work client_task;
wait_queue_head_t down_waitqueue;
+ wait_queue_head_t vc_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
struct list_head mac_filter_list;
@@ -295,6 +298,7 @@ struct iavf_adapter {
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
+#define IAVF_FLAG_INITIAL_MAC_SET BIT(23)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
@@ -430,6 +434,11 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
+ /* snapshot of "num_active_queues" before setup_tc for qdisc add
+ * is invoked. This information is useful during qdisc del flow,
+ * to restore correct number of queues
+ */
+ int orig_num_active_queues;
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
@@ -567,6 +576,8 @@ void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac);
void
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
netdev_features_t prev_features,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2e2c153ce46a..45d097a164ad 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -978,6 +978,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
+ f->add_handled = false;
f->is_new_mac = true;
f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
@@ -989,47 +990,132 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
}
/**
- * iavf_set_mac - NDO callback to set port mac address
- * @netdev: network interface device structure
- * @p: pointer to an address structure
+ * iavf_replace_primary_mac - Replace current primary address
+ * @adapter: board private structure
+ * @new_mac: new MAC address to be applied
*
- * Returns 0 on success, negative on failure
+ * Replace current dev_addr and send request to PF for removal of previous
+ * primary MAC address filter and addition of new primary MAC filter.
+ * Return 0 for success, -ENOMEM for failure.
+ *
+ * Do not call this with mac_vlan_list_lock!
**/
-static int iavf_set_mac(struct net_device *netdev, void *p)
+int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac)
{
- struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
- return 0;
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->is_primary = false;
+ }
+
f = iavf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
- f->is_primary = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
- f = iavf_add_filter(adapter, addr->sa_data);
+ f = iavf_add_filter(adapter, new_mac);
+
if (f) {
+ /* Always send the request to add if changing primary MAC
+ * even if filter is already present on the list
+ */
f->is_primary = true;
- ether_addr_copy(hw->mac.addr, addr->sa_data);
+ f->add = true;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
+ ether_addr_copy(hw->mac.addr, new_mac);
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* schedule the watchdog task to immediately process the request */
- if (f)
+ if (f) {
queue_work(iavf_wq, &adapter->watchdog_task.work);
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * iavf_is_mac_set_handled - wait for a response to set MAC from PF
+ * @netdev: network interface device structure
+ * @macaddr: MAC address to set
+ *
+ * Returns true on success, false on failure
+ */
+static bool iavf_is_mac_set_handled(struct net_device *netdev,
+ const u8 *macaddr)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_mac_filter *f;
+ bool ret = false;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+ f = iavf_find_filter(adapter, macaddr);
+
+ if (!f || (!f->add && f->add_handled))
+ ret = true;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ return ret;
+}
+
+/**
+ * iavf_set_mac - NDO callback to set port MAC address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int iavf_set_mac(struct net_device *netdev, void *p)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
+ int ret;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = iavf_replace_primary_mac(adapter, addr->sa_data);
+
+ if (ret)
+ return ret;
+
+ /* If this is an initial set MAC during VF spawn do not wait */
+ if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
+ adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
+ return 0;
+ }
+
+ if (handle_mac)
+ goto done;
+
+ ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));
+
+ /* If ret < 0 then it means wait was interrupted.
+ * If ret == 0 then it means we got a timeout.
+ * else it means we got response for set MAC from PF,
+ * check if netdev MAC was updated to requested MAC,
+ * if yes then set MAC succeeded otherwise it failed return -EACCES
+ */
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return -EAGAIN;
- return (f == NULL) ? -ENOMEM : 0;
+done:
+ if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return -EACCES;
+
+ return 0;
}
/**
@@ -2445,6 +2531,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
+ adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
+
adapter->tx_desc_count = IAVF_DEFAULT_TXD;
adapter->rx_desc_count = IAVF_DEFAULT_RXD;
err = iavf_init_interrupt_scheme(adapter);
@@ -3322,6 +3410,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 total_max_rate = 0;
+ u32 tx_rate_rem = 0;
int i, num_qps = 0;
u64 tx_rate = 0;
int ret = 0;
@@ -3336,12 +3425,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
return -EINVAL;
if (mqprio_qopt->min_rate[i]) {
dev_err(&adapter->pdev->dev,
- "Invalid min tx rate (greater than 0) specified\n");
+ "Invalid min tx rate (greater than 0) specified for TC%d\n",
+ i);
return -EINVAL;
}
- /*convert to Mbps */
+
+ /* convert to Mbps */
tx_rate = div_u64(mqprio_qopt->max_rate[i],
IAVF_MBPS_DIVISOR);
+
+ if (mqprio_qopt->max_rate[i] &&
+ tx_rate < IAVF_MBPS_QUANTA) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, minimum %dMbps\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
+ (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
+
+ if (tx_rate_rem != 0) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, not divisible by %d\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
@@ -3408,6 +3517,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
netif_tx_disable(netdev);
iavf_del_all_cloud_filters(adapter);
adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
+ total_qps = adapter->orig_num_active_queues;
goto exit;
} else {
return -EINVAL;
@@ -3451,7 +3561,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
adapter->ch_config.ch_info[i].offset = 0;
}
}
+
+ /* Take snapshot of original config such as "num_active_queues"
+ * It is used later when delete ADQ flow is exercised, so that
+ * once delete ADQ flow completes, VF shall go back to its
+ * original queue configuration
+ */
+
+ adapter->orig_num_active_queues = adapter->num_active_queues;
+
+ /* Store queue info based on TC so that VF gets configured
+ * with correct number of queues when VF completes ADQ config
+ * flow
+ */
adapter->ch_config.total_qps = total_qps;
+
netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev);
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
@@ -3468,6 +3592,12 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
}
}
exit:
+ if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
+ return 0;
+
+ netif_set_real_num_rx_queues(netdev, total_qps);
+ netif_set_real_num_tx_queues(netdev, total_qps);
+
return ret;
}
@@ -3744,6 +3874,29 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
}
/**
+ * iavf_find_cf - Find the cloud filter in the list
+ * @adapter: Board private structure
+ * @cookie: filter specific cookie
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * cloud_filter_list_lock.
+ */
+static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
+ unsigned long *cookie)
+{
+ struct iavf_cloud_filter *filter = NULL;
+
+ if (!cookie)
+ return NULL;
+
+ list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+ return filter;
+ }
+ return NULL;
+}
+
+/**
* iavf_configure_clsflower - Add tc flower filters
* @adapter: board private structure
* @cls_flower: Pointer to struct flow_cls_offload
@@ -3774,6 +3927,15 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter->cookie = cls_flower->cookie;
+ /* bail out here if filter already exists */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ if (iavf_find_cf(adapter, &cls_flower->cookie)) {
+ dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
+ err = -EEXIST;
+ goto spin_unlock;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
/* set the mask to all zeroes to begin with */
memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
/* start out with flow type and eth type IPv4 to begin with */
@@ -3792,6 +3954,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
adapter->num_cloud_filters++;
filter->add = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
+spin_unlock:
spin_unlock_bh(&adapter->cloud_filter_list_lock);
err:
if (err)
@@ -3801,28 +3964,6 @@ err:
return err;
}
-/* iavf_find_cf - Find the cloud filter in the list
- * @adapter: Board private structure
- * @cookie: filter specific cookie
- *
- * Returns ptr to the filter object or NULL. Must be called while holding the
- * cloud_filter_list_lock.
- */
-static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
- unsigned long *cookie)
-{
- struct iavf_cloud_filter *filter = NULL;
-
- if (!cookie)
- return NULL;
-
- list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
- if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
- return filter;
- }
- return NULL;
-}
-
/**
* iavf_delete_clsflower - Remove tc flower filters
* @adapter: board private structure
@@ -4159,7 +4300,7 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
@@ -4678,6 +4819,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
+ /* Setup the wait queue for indicating virtchannel events */
+ init_waitqueue_head(&adapter->vc_waitqueue);
+
return 0;
err_ioremap:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 1603e99bae4a..15ee85dc33bd 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -5,10 +5,6 @@
#include "iavf_prototype.h"
#include "iavf_client.h"
-/* busy wait delay in msec */
-#define IAVF_BUSY_WAIT_DELAY 10
-#define IAVF_BUSY_WAIT_COUNT 50
-
/**
* iavf_send_pf_msg
* @adapter: adapter structure
@@ -598,6 +594,8 @@ static void iavf_mac_add_ok(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
f->is_new_mac = false;
+ if (!f->add && !f->add_handled)
+ f->add_handled = true;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
@@ -618,6 +616,9 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
f->remove = false;
+ if (!f->add && !f->add_handled)
+ f->add_handled = true;
+
if (f->is_new_mac) {
list_del(&f->list);
kfree(f);
@@ -1970,6 +1971,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ wake_up(&adapter->vc_waitqueue);
break;
case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
@@ -2134,7 +2136,13 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
- eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ if (!ether_addr_equal(netdev->dev_addr,
+ adapter->hw.mac.addr)) {
+ netif_addr_lock_bh(netdev);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
+ }
+ wake_up(&adapter->vc_waitqueue);
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -2164,10 +2172,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
/* restore current mac address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
+ netif_addr_lock_bh(netdev);
/* refresh current mac address if changed */
- eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
iavf_add_filter(adapter, adapter->hw.mac.addr);
@@ -2203,6 +2212,10 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
fallthrough;
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
+ struct iavf_mac_filter *f;
+ bool was_mac_changed;
+ u64 aq_required = 0;
+
if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
memcpy(&adapter->vlan_v2_caps, msg,
min_t(u16, msglen,
@@ -2210,6 +2223,46 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_process_config(adapter);
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ was_mac_changed = !ether_addr_equal(netdev->dev_addr,
+ adapter->hw.mac.addr);
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+ /* re-add all MAC filters */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (was_mac_changed &&
+ ether_addr_equal(netdev->dev_addr, f->macaddr))
+ ether_addr_copy(f->macaddr,
+ adapter->hw.mac.addr);
+
+ f->is_new_mac = true;
+ f->add = true;
+ f->add_handled = false;
+ f->remove = false;
+ }
+
+ /* re-add all VLAN filters */
+ if (VLAN_FILTERING_ALLOWED(adapter)) {
+ struct iavf_vlan_filter *vlf;
+
+ if (!list_empty(&adapter->vlan_filter_list)) {
+ list_for_each_entry(vlf,
+ &adapter->vlan_filter_list,
+ list)
+ vlf->add = true;
+
+ aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+ }
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ netif_addr_lock_bh(netdev);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ aq_required;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 60453b3b8d23..cc5b85afd437 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -52,6 +52,7 @@
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
#include <net/gtp.h>
+#include <linux/ppp_defs.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -181,6 +182,7 @@
enum ice_feature {
ICE_F_DSCP,
+ ICE_F_PTP_EXTTS,
ICE_F_SMA_CTRL,
ICE_F_GNSS,
ICE_F_MAX
@@ -246,8 +248,6 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
- struct ice_vsi *dflt_vsi; /* default VSI for this switch */
- u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_pf_state {
@@ -544,8 +544,8 @@ struct ice_pf {
u32 msg_enable;
struct ice_ptp ptp;
struct tty_driver *ice_gnss_tty_driver;
- struct tty_port gnss_tty_port;
- struct gnss_serial *gnss_serial;
+ struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES];
+ struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES];
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 05cb9dd7035a..9939238573a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1395,7 +1395,7 @@ struct ice_aqc_get_link_topo {
u8 rsvd[9];
};
-/* Read I2C (direct, 0x06E2) */
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
struct ice_aqc_i2c {
struct ice_aqc_link_topo_addr topo_addr;
__le16 i2c_addr;
@@ -1405,7 +1405,7 @@ struct ice_aqc_i2c {
u8 rsvd;
__le16 i2c_bus_addr;
- u8 rsvd2[4];
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
};
/* Read I2C Response (direct, 0x06E2) */
@@ -2124,7 +2124,7 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
- struct ice_aqc_i2c read_i2c;
+ struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
} params;
};
@@ -2241,6 +2241,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
+ ice_aqc_opc_write_i2c = 0x06E3,
ice_aqc_opc_set_port_id_led = 0x06E9,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 9619bdb9e49a..27d0cbbd29da 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4823,7 +4823,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
- cmd = &desc.params.read_i2c;
+ cmd = &desc.params.read_write_i2c;
if (!data)
return -EINVAL;
@@ -4851,6 +4851,51 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_aq_write_i2c
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ * @cd: pointer to command details structure or NULL
+ *
+ * Write I2C (0x06E3)
+ *
+ * * Return:
+ * * 0 - Successful write to the i2c device
+ * * -EINVAL - Data size greater than 4 bytes
+ * * -EIO - FW error
+ */
+int
+ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc = { 0 };
+ struct ice_aqc_i2c *cmd;
+ u8 data_size;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return -EINVAL;
+
+ cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ memcpy(cmd->i2c_data, data, data_size);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_driver_param - Set driver parameter to share via firmware
* @hw: pointer to the HW struct
* @idx: parameter index to set
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 872ea7d2332d..61b7c60db689 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -214,5 +214,9 @@ int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd);
+int
+ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 6a463b242c7d..e35371e61e07 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -133,8 +133,8 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
- if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
rule_added = true;
}
@@ -151,7 +151,7 @@ err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
err_def_rx:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -411,7 +411,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 4efa5e5846e0..a6fff8ebaf9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1293,7 +1293,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* promiscuous mode because it's not supported
*/
if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
- ice_is_any_vf_in_promisc(pf)) {
+ ice_is_any_vf_in_unicast_promisc(pf)) {
dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
/* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 5d10c4f84a36..ead6d50fc0ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -852,7 +852,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg)
return -ENOMEM;
- tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1214,7 +1214,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg)
return -ENOMEM;
- tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index c73cdab44f70..4b3bb19e1d06 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1964,8 +1964,11 @@ ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
}
}
} while (fv);
- if (list_empty(fv_list))
+ if (list_empty(fv_list)) {
+ dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package");
return -EIO;
+ }
+
return 0;
err:
@@ -2639,7 +2642,7 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
*
* This function will either add or move a ptype to a particular PTG depending
* on if the ptype is already part of another group. Note that using a
- * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
static int
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 57586a2e6dec..b5a7f246d230 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -1,11 +1,104 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2018-2021, Intel Corporation. */
+/* Copyright (C) 2021-2022, Intel Corporation. */
#include "ice.h"
#include "ice_lib.h"
#include <linux/tty_driver.h>
/**
+ * ice_gnss_do_write - Write data to internal GNSS
+ * @pf: board private structure
+ * @buf: command buffer
+ * @size: command buffer size
+ *
+ * Write UBX command data to the GNSS receiver
+ */
+static unsigned int
+ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ struct ice_hw *hw = &pf->hw;
+ unsigned int offset = 0;
+ int err = 0;
+
+ memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+ link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+ link_topo.topo_params.node_type_ctx |=
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE);
+
+ /* It's not possible to write a single byte to u-blox.
+ * Write all bytes in a loop until there are 6 or less bytes left. If
+ * there are exactly 6 bytes left, the last write would be only a byte.
+ * In this case, do 4+2 bytes writes instead of 5+1. Otherwise, do the
+ * last 2 to 5 bytes write.
+ */
+ while (size - offset > ICE_GNSS_UBX_WRITE_BYTES + 1) {
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]),
+ ICE_MAX_I2C_WRITE_BYTES,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ offset += ICE_GNSS_UBX_WRITE_BYTES;
+ }
+
+ /* Single byte would be written. Write 4 bytes instead of 5. */
+ if (size - offset == ICE_GNSS_UBX_WRITE_BYTES + 1) {
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]),
+ ICE_MAX_I2C_WRITE_BYTES - 1,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ offset += ICE_GNSS_UBX_WRITE_BYTES - 1;
+ }
+
+ /* Do the last write, 2 to 5 bytes. */
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]), size - offset - 1,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ return size;
+
+err_out:
+ dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
+ offset, size, err);
+
+ return offset;
+}
+
+/**
+ * ice_gnss_write_pending - Write all pending data to internal GNSS
+ * @work: GNSS write work structure
+ */
+static void ice_gnss_write_pending(struct kthread_work *work)
+{
+ struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+ write_work);
+ struct ice_pf *pf = gnss->back;
+
+ if (!list_empty(&gnss->queue)) {
+ struct gnss_write_buf *write_buf = NULL;
+ unsigned int bytes;
+
+ write_buf = list_first_entry(&gnss->queue,
+ struct gnss_write_buf, queue);
+
+ bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
+ dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
+
+ list_del(&write_buf->queue);
+ kfree(write_buf->buf);
+ kfree(write_buf);
+ }
+}
+
+/**
* ice_gnss_read - Read data from internal GNSS module
* @work: GNSS read work structure
*
@@ -17,13 +110,13 @@ static void ice_gnss_read(struct kthread_work *work)
struct gnss_serial *gnss = container_of(work, struct gnss_serial,
read_work.work);
struct ice_aqc_link_topo_addr link_topo;
- u8 i2c_params, bytes_read;
+ unsigned int i, bytes_read, data_len;
struct tty_port *port;
struct ice_pf *pf;
struct ice_hw *hw;
__be16 data_len_b;
char *buf = NULL;
- u16 i, data_len;
+ u8 i2c_params;
int err = 0;
pf = gnss->back;
@@ -65,7 +158,7 @@ static void ice_gnss_read(struct kthread_work *work)
mdelay(10);
}
- data_len = min(data_len, (u16)PAGE_SIZE);
+ data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
data_len = tty_buffer_request_room(port, data_len);
if (!data_len) {
err = -ENOMEM;
@@ -74,9 +167,10 @@ static void ice_gnss_read(struct kthread_work *work)
/* Read received data */
for (i = 0; i < data_len; i += bytes_read) {
- u16 bytes_left = data_len - i;
+ unsigned int bytes_left = data_len - i;
- bytes_read = min_t(typeof(bytes_left), bytes_left, ICE_MAX_I2C_DATA_SIZE);
+ bytes_read = min_t(typeof(bytes_left), bytes_left,
+ ICE_MAX_I2C_DATA_SIZE);
err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
@@ -103,8 +197,9 @@ exit:
/**
* ice_gnss_struct_init - Initialize GNSS structure for the TTY
* @pf: Board private structure
+ * @index: TTY device index
*/
-static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
+static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
{
struct device *dev = ice_pf_to_dev(pf);
struct kthread_worker *kworker;
@@ -117,9 +212,11 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
mutex_init(&gnss->gnss_mutex);
gnss->open_count = 0;
gnss->back = pf;
- pf->gnss_serial = gnss;
+ pf->gnss_serial[index] = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
+ INIT_LIST_HEAD(&gnss->queue);
+ kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
/* Allocate a kworker for handling work required for the GNSS TTY
* writes.
*/
@@ -155,10 +252,10 @@ static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = NULL;
/* Get the serial object associated with this tty pointer */
- gnss = pf->gnss_serial;
+ gnss = pf->gnss_serial[tty->index];
if (!gnss) {
/* Initialize GNSS struct on the first device open */
- gnss = ice_gnss_struct_init(pf);
+ gnss = ice_gnss_struct_init(pf, tty->index);
if (!gnss)
return -ENOMEM;
}
@@ -211,25 +308,100 @@ exit:
}
/**
- * ice_gnss_tty_write - Dummy TTY write function to avoid kernel panic
+ * ice_gnss_tty_write - Write GNSS data
* @tty: pointer to the tty_struct
* @buf: pointer to the user data
- * @cnt: the number of characters that was able to be sent to the hardware (or
- * queued to be sent at a later time)
+ * @count: the number of characters queued to be sent to the HW
+ *
+ * The write function call is called by the user when there is data to be sent
+ * to the hardware. First the tty core receives the call, and then it passes the
+ * data on to the tty driver's write function. The tty core also tells the tty
+ * driver the size of the data being sent.
+ * If any errors happen during the write call, a negative error value should be
+ * returned instead of the number of characters queued to be written.
*/
static int
-ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int cnt)
+ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
- return 0;
+ struct gnss_write_buf *write_buf;
+ struct gnss_serial *gnss;
+ unsigned char *cmd_buf;
+ struct ice_pf *pf;
+ int err = count;
+
+ /* We cannot write a single byte using our I2C implementation. */
+ if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
+ return -EINVAL;
+
+ gnss = tty->driver_data;
+ if (!gnss)
+ return -EFAULT;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return -EFAULT;
+
+ /* Only allow to write on TTY 0 */
+ if (gnss != pf->gnss_serial[0])
+ return -EIO;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
+ if (!cmd_buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(cmd_buf, buf, count);
+
+ /* Send the data out to a hardware port */
+ write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
+ if (!write_buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ write_buf->buf = cmd_buf;
+ write_buf->size = count;
+ INIT_LIST_HEAD(&write_buf->queue);
+ list_add_tail(&write_buf->queue, &gnss->queue);
+ kthread_queue_work(gnss->kworker, &gnss->write_work);
+exit:
+ mutex_unlock(&gnss->gnss_mutex);
+ return err;
}
/**
- * ice_gnss_tty_write_room - Dummy TTY write_room function to avoid kernel panic
+ * ice_gnss_tty_write_room - Returns the numbers of characters to be written.
* @tty: pointer to the tty_struct
+ *
+ * This routine returns the numbers of characters the tty driver will accept
+ * for queuing to be written or 0 if either the TTY is not open or user
+ * tries to write to the TTY other than the first.
*/
static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty)
{
- return 0;
+ struct gnss_serial *gnss = tty->driver_data;
+
+ /* Only allow to write on TTY 0 */
+ if (!gnss || gnss != gnss->back->gnss_serial[0])
+ return 0;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ mutex_unlock(&gnss->gnss_mutex);
+ return 0;
+ }
+
+ mutex_unlock(&gnss->gnss_mutex);
+ return ICE_GNSS_TTY_WRITE_BUF;
}
static const struct tty_operations tty_gps_ops = {
@@ -249,11 +421,13 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
const int ICE_TTYDRV_NAME_MAX = 14;
struct tty_driver *tty_driver;
char *ttydrv_name;
+ unsigned int i;
int err;
- tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
+ tty_driver = tty_alloc_driver(ICE_GNSS_TTY_MINOR_DEVICES,
+ TTY_DRIVER_REAL_RAW);
if (IS_ERR(tty_driver)) {
- dev_err(ice_pf_to_dev(pf), "Failed to allocate memory for GNSS TTY\n");
+ dev_err(dev, "Failed to allocate memory for GNSS TTY\n");
return NULL;
}
@@ -283,23 +457,32 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
tty_driver->driver_state = pf;
tty_set_operations(tty_driver, &tty_gps_ops);
- pf->gnss_serial = NULL;
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
+ GFP_KERNEL);
+ pf->gnss_serial[i] = NULL;
- tty_port_init(&pf->gnss_tty_port);
- tty_port_link_device(&pf->gnss_tty_port, tty_driver, 0);
+ tty_port_init(pf->gnss_tty_port[i]);
+ tty_port_link_device(pf->gnss_tty_port[i], tty_driver, i);
+ }
err = tty_register_driver(tty_driver);
if (err) {
- dev_err(ice_pf_to_dev(pf), "Failed to register TTY driver err=%d\n",
- err);
+ dev_err(dev, "Failed to register TTY driver err=%d\n", err);
- tty_port_destroy(&pf->gnss_tty_port);
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ tty_port_destroy(pf->gnss_tty_port[i]);
+ kfree(pf->gnss_tty_port[i]);
+ }
kfree(ttydrv_name);
tty_driver_kref_put(pf->ice_gnss_tty_driver);
return NULL;
}
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
+ dev_info(dev, "%s%d registered\n", ttydrv_name, i);
+
return tty_driver;
}
@@ -327,17 +510,25 @@ void ice_gnss_init(struct ice_pf *pf)
*/
void ice_gnss_exit(struct ice_pf *pf)
{
+ unsigned int i;
+
if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver)
return;
- tty_port_destroy(&pf->gnss_tty_port);
+ for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ if (pf->gnss_tty_port[i]) {
+ tty_port_destroy(pf->gnss_tty_port[i]);
+ kfree(pf->gnss_tty_port[i]);
+ }
- if (pf->gnss_serial) {
- struct gnss_serial *gnss = pf->gnss_serial;
+ if (pf->gnss_serial[i]) {
+ struct gnss_serial *gnss = pf->gnss_serial[i];
- kthread_cancel_delayed_work_sync(&gnss->read_work);
- kfree(gnss);
- pf->gnss_serial = NULL;
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ kfree(gnss);
+ pf->gnss_serial[i] = NULL;
+ }
}
tty_unregister_driver(pf->ice_gnss_tty_driver);
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 9211adb2372c..f454dd1d9285 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018-2021, Intel Corporation. */
+/* Copyright (C) 2021-2022, Intel Corporation. */
#ifndef _ICE_GNSS_H_
#define _ICE_GNSS_H_
@@ -8,14 +8,34 @@
#include <linux/tty_flip.h>
#define ICE_E810T_GNSS_I2C_BUS 0x2
+#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
+/* Create 2 minor devices, both using the same GNSS module. First one is RW,
+ * second one RO.
+ */
+#define ICE_GNSS_TTY_MINOR_DEVICES 2
+#define ICE_GNSS_TTY_WRITE_BUF 250
+#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+#define ICE_MAX_I2C_WRITE_BYTES 4
+
+/* u-blox ZED-F9T specific definitions */
#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42
/* Data length register is big endian */
#define ICE_GNSS_UBX_DATA_LEN_H 0xFD
#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2
#define ICE_GNSS_UBX_EMPTY_DATA 0xFF
-#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
-#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+/* For u-blox writes are performed without address so the first byte to write is
+ * passed as I2C addr parameter.
+ */
+#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
#define ICE_MAX_UBX_READ_TRIES 255
+#define ICE_MAX_UBX_ACK_READ_TRIES 4095
+
+struct gnss_write_buf {
+ struct list_head queue;
+ unsigned int size;
+ unsigned char *buf;
+};
+
/**
* struct gnss_serial - data used to initialize GNSS TTY port
@@ -25,6 +45,8 @@
* @gnss_mutex: gnss_mutex used to protect GNSS serial operations
* @kworker: kwork thread for handling periodic work
* @read_work: read_work function for handling GNSS reads
+ * @write_work: write_work function for handling GNSS writes
+ * @queue: write buffers queue
*/
struct gnss_serial {
struct ice_pf *back;
@@ -33,6 +55,8 @@ struct gnss_serial {
struct mutex gnss_mutex; /* protects GNSS serial structure */
struct kthread_worker *kworker;
struct kthread_delayed_work read_work;
+ struct kthread_work write_work;
+ struct list_head queue;
};
#if IS_ENABLED(CONFIG_TTY)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 4f954db01b92..c9f7393b783d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -447,11 +447,9 @@ void ice_deinit_lag(struct ice_pf *pf)
if (lag->pf)
ice_unregister_lag_handler(lag);
- if (lag->upper_netdev)
- dev_put(lag->upper_netdev);
+ dev_put(lag->upper_netdev);
- if (lag->peer_netdev)
- dev_put(lag->peer_netdev);
+ dev_put(lag->peer_netdev);
kfree(lag);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index f7f9c973ec54..a830f7f9aed0 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -887,6 +887,9 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ ctxt->info.outer_vlan_flags |=
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
}
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
@@ -2419,7 +2422,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
agg_id);
return;
}
- /* aggregator node is created, store the neeeded info */
+ /* aggregator node is created, store the needed info */
agg_node->valid = true;
agg_node->agg_id = agg_id;
}
@@ -3003,8 +3006,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
- ice_clear_dflt_vsi(pf->first_sw);
+ if (ice_is_vsi_dflt_vsi(vsi))
+ ice_clear_dflt_vsi(vsi);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
@@ -3687,116 +3690,97 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
- * @sw: switch to check if its default forwarding VSI is free
+ * @pi: port info of the switch with default VSI
*
- * Return true if the default forwarding VSI is already being used, else returns
- * false signalling that it's available to use.
+ * Return true if the there is a single VSI in default forwarding VSI list
*/
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
{
- return (sw->dflt_vsi && sw->dflt_vsi_ena);
+ bool exists = false;
+
+ ice_check_if_dflt_vsi(pi, 0, &exists);
+ return exists;
}
/**
* ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
- * @sw: switch for the default forwarding VSI to compare against
* @vsi: VSI to compare against default forwarding VSI
*
* If this VSI passed in is the default forwarding VSI then return true, else
* return false
*/
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
{
- return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
+ return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
}
/**
* ice_set_dflt_vsi - set the default forwarding VSI
- * @sw: switch used to assign the default forwarding VSI
* @vsi: VSI getting set as the default forwarding VSI on the switch
*
* If the VSI passed in is already the default VSI and it's enabled just return
* success.
*
- * If there is already a default VSI on the switch and it's enabled then return
- * -EEXIST since there can only be one default VSI per switch.
- *
- * Otherwise try to set the VSI passed in as the switch's default VSI and
- * return the result.
+ * Otherwise try to set the VSI passed in as the switch's default VSI and
+ * return the result.
*/
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+int ice_set_dflt_vsi(struct ice_vsi *vsi)
{
struct device *dev;
int status;
- if (!sw || !vsi)
+ if (!vsi)
return -EINVAL;
dev = ice_pf_to_dev(vsi->back);
/* the VSI passed in is already the default VSI */
- if (ice_is_vsi_dflt_vsi(sw, vsi)) {
+ if (ice_is_vsi_dflt_vsi(vsi)) {
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
vsi->vsi_num);
return 0;
}
- /* another VSI is already the default VSI for this switch */
- if (ice_is_dflt_vsi_in_use(sw)) {
- dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
- sw->dflt_vsi->vsi_num);
- return -EEXIST;
- }
-
- status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status);
return status;
}
- sw->dflt_vsi = vsi;
- sw->dflt_vsi_ena = true;
-
return 0;
}
/**
* ice_clear_dflt_vsi - clear the default forwarding VSI
- * @sw: switch used to clear the default VSI
+ * @vsi: VSI to remove from filter list
*
* If the switch has no default VSI or it's not enabled then return error.
*
* Otherwise try to clear the default VSI and return the result.
*/
-int ice_clear_dflt_vsi(struct ice_sw *sw)
+int ice_clear_dflt_vsi(struct ice_vsi *vsi)
{
- struct ice_vsi *dflt_vsi;
struct device *dev;
int status;
- if (!sw)
+ if (!vsi)
return -EINVAL;
- dev = ice_pf_to_dev(sw->pf);
-
- dflt_vsi = sw->dflt_vsi;
+ dev = ice_pf_to_dev(vsi->back);
/* there is no default VSI configured */
- if (!ice_is_dflt_vsi_in_use(sw))
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info))
return -ENODEV;
- status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
- dflt_vsi->vsi_num, status);
+ vsi->vsi_num, status);
return -EIO;
}
- sw->dflt_vsi = NULL;
- sw->dflt_vsi_ena = false;
-
return 0;
}
@@ -4179,6 +4163,7 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
+ ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
if (ice_is_e810t(&pf->hw)) {
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
if (ice_gnss_is_gps_present(&pf->hw))
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 0095329949d4..8712b1d2ceec 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -102,13 +102,10 @@ int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
-
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_clear_dflt_vsi(struct ice_sw *sw);
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
+int ice_set_dflt_vsi(struct ice_vsi *vsi);
+int ice_clear_dflt_vsi(struct ice_vsi *vsi);
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
int ice_get_link_speed_kbps(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 9f02b60459f1..eb40526ee179 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -410,8 +410,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
- if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
- err = ice_set_dflt_vsi(pf->first_sw, vsi);
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
+ err = ice_set_dflt_vsi(vsi);
if (err && err != -EEXIST) {
netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -424,8 +424,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* Clear Rx filter to remove traffic from wire */
- if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
- err = ice_clear_dflt_vsi(pf->first_sw);
+ if (ice_is_vsi_dflt_vsi(vsi)) {
+ err = ice_clear_dflt_vsi(vsi);
if (err) {
netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -433,7 +433,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
- if (vsi->current_netdev_flags &
+ if (vsi->netdev->features &
NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_ops->ena_rx_filtering(vsi);
}
@@ -3358,6 +3358,7 @@ static void ice_set_netdev_features(struct net_device *netdev)
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_LOOPBACK;
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
@@ -5315,12 +5316,6 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_aer_clear_nonfatal_status(pdev);
- if (err)
- dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
- err);
- /* non-fatal, continue */
-
return result;
}
@@ -5919,6 +5914,32 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
}
/**
+ * ice_set_loopback - turn on/off loopback mode on underlying PF
+ * @vsi: ptr to VSI
+ * @ena: flag to indicate the on/off setting
+ */
+static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
+{
+ bool if_running = netif_running(vsi->netdev);
+ int ret;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
+ return ret;
+ }
+ }
+ ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
+ if (ret)
+ netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
+ if (if_running)
+ ret = ice_up(vsi);
+
+ return ret;
+}
+
+/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -5926,44 +5947,41 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
static int
ice_set_features(struct net_device *netdev, netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
- if (ice_is_safe_mode(vsi->back)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
+ if (ice_is_safe_mode(pf)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ dev_err(ice_pf_to_dev(pf),
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
- if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
- ice_vsi_manage_rss_lut(vsi, true);
- else if (!(features & NETIF_F_RXHASH) &&
- netdev->features & NETIF_F_RXHASH)
- ice_vsi_manage_rss_lut(vsi, false);
+ if (changed & NETIF_F_RXHASH)
+ ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
ret = ice_set_vlan_features(netdev, features);
if (ret)
return ret;
- if ((features & NETIF_F_NTUPLE) &&
- !(netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, true);
- ice_init_arfs(vsi);
- } else if (!(features & NETIF_F_NTUPLE) &&
- (netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, false);
- ice_clear_arfs(vsi);
+ if (changed & NETIF_F_NTUPLE) {
+ bool ena = !!(features & NETIF_F_NTUPLE);
+
+ ice_vsi_manage_fdir(vsi, ena);
+ ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
}
/* don't turn off hw_tc_offload when ADQ is already enabled */
@@ -5972,13 +5990,17 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return -EACCES;
}
- if ((features & NETIF_F_HW_TC) &&
- !(netdev->features & NETIF_F_HW_TC))
- set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
- else
- clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ if (changed & NETIF_F_HW_TC) {
+ bool ena = !!(features & NETIF_F_HW_TC);
- return 0;
+ ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+
+ return ret;
}
/**
@@ -7000,12 +7022,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
- if (pf->first_sw->dflt_vsi_ena)
- dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
- /* clear the default VSI configuration if it exists */
- pf->first_sw->dflt_vsi = NULL;
- pf->first_sw->dflt_vsi_ena = false;
-
ice_clear_pxe_mode(hw);
err = ice_init_nvm(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 3f64300b0e14..560efc7654c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -43,6 +43,9 @@ enum ice_protocol_type {
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
+ ICE_PPPOE,
+ ICE_VLAN_EX,
+ ICE_VLAN_IN,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
@@ -107,15 +110,21 @@ enum ice_prot_id {
#define ICE_TCP_IL_HW 49
#define ICE_UDP_ILOS_HW 53
#define ICE_GRE_OF_HW 64
+#define ICE_PPPOE_HW 103
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
-#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
#define ICE_MDID_SIZE 2
+
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_VLAN_FLAG_MDID 20
+#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
+#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
+
#define ICE_TUN_FLAG_FV_IND 2
/* Mapping of software defined protocol ID to hardware defined protocol ID */
@@ -200,6 +209,14 @@ struct ice_udp_gtp_hdr {
u8 rsvrd;
};
+struct ice_pppoe_hdr {
+ u8 rsrvd_ver_type;
+ u8 rsrvd_code;
+ __be16 session_id;
+ __be16 length;
+ __be16 ppp_prot_id; /* control and data only */
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -217,6 +234,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pppoe_hdr pppoe_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ef9344ef0d8e..72b663108a4a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1102,9 +1102,8 @@ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- u64 freq, divisor = 1000000ULL;
struct ice_hw *hw = &pf->hw;
- s64 incval, diff;
+ u64 incval, diff;
int neg_adj = 0;
int err;
@@ -1115,17 +1114,8 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
- /* handle overflow by scaling down the scaled_ppm and
- * the divisor, losing some precision
- */
- scaled_ppm >>= 2;
- divisor >>= 2;
- }
-
- freq = (incval * (u64)scaled_ppm) >> 16;
- diff = div_u64(freq, divisor);
-
+ diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
incval -= diff;
else
@@ -1900,9 +1890,12 @@ ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
}
info->n_per_out = N_PER_OUT_E810T;
- info->n_ext_ts = N_EXT_TS_E810;
- info->n_pins = NUM_PTP_PINS_E810T;
- info->verify = ice_verify_pin_e810t;
+
+ if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) {
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+ }
/* Complete setup of the SMA pins */
ice_ptp_setup_sma_pins_e810t(pf, info);
@@ -1910,11 +1903,16 @@ ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
* @info: PTP clock capabilities
*/
-static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
+static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->n_per_out = N_PER_OUT_E810;
+
+ if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+ return;
+
info->n_ext_ts = N_EXT_TS_E810;
}
@@ -1956,7 +1954,7 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
if (ice_is_e810t(&pf->hw))
ice_ptp_setup_pins_e810t(pf, info);
else
- ice_ptp_setup_pins_e810(info);
+ ice_ptp_setup_pins_e810(pf, info);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index f4907a3c2d19..3ba1408c56a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1553,16 +1553,6 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
goto out_put_vf;
}
- /* when max_tx_rate is zero that means no max Tx rate limiting, so only
- * check if max_tx_rate is non-zero
- */
- if (max_tx_rate && min_tx_rate > max_tx_rate) {
- dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
- min_tx_rate, max_tx_rate);
- ret = -EINVAL;
- goto out_put_vf;
- }
-
if (min_tx_rate && ice_is_dcb_active(pf)) {
dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
ret = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 8d8f3eec79ee..fce204693dbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -31,16 +31,17 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
enum {
- ICE_PKT_VLAN = BIT(0),
- ICE_PKT_OUTER_IPV6 = BIT(1),
- ICE_PKT_TUN_GTPC = BIT(2),
- ICE_PKT_TUN_GTPU = BIT(3),
- ICE_PKT_TUN_NVGRE = BIT(4),
- ICE_PKT_TUN_UDP = BIT(5),
- ICE_PKT_INNER_IPV6 = BIT(6),
- ICE_PKT_INNER_TCP = BIT(7),
- ICE_PKT_INNER_UDP = BIT(8),
- ICE_PKT_GTP_NOPAY = BIT(9),
+ ICE_PKT_OUTER_IPV6 = BIT(0),
+ ICE_PKT_TUN_GTPC = BIT(1),
+ ICE_PKT_TUN_GTPU = BIT(2),
+ ICE_PKT_TUN_NVGRE = BIT(3),
+ ICE_PKT_TUN_UDP = BIT(4),
+ ICE_PKT_INNER_IPV6 = BIT(5),
+ ICE_PKT_INNER_TCP = BIT(6),
+ ICE_PKT_INNER_UDP = BIT(7),
+ ICE_PKT_GTP_NOPAY = BIT(8),
+ ICE_PKT_KMALLOC = BIT(9),
+ ICE_PKT_PPPOE = BIT(10),
};
struct ice_dummy_pkt_offsets {
@@ -53,22 +54,42 @@ struct ice_dummy_pkt_profile {
const u8 *pkt;
u32 match;
u16 pkt_len;
+ u16 offsets_len;
};
-#define ICE_DECLARE_PKT_OFFSETS(type) \
- static const struct ice_dummy_pkt_offsets \
+#define ICE_DECLARE_PKT_OFFSETS(type) \
+ static const struct ice_dummy_pkt_offsets \
ice_dummy_##type##_packet_offsets[]
-#define ICE_DECLARE_PKT_TEMPLATE(type) \
+#define ICE_DECLARE_PKT_TEMPLATE(type) \
static const u8 ice_dummy_##type##_packet[]
-#define ICE_PKT_PROFILE(type, m) { \
- .match = (m), \
- .pkt = ice_dummy_##type##_packet, \
- .pkt_len = sizeof(ice_dummy_##type##_packet), \
- .offsets = ice_dummy_##type##_packet_offsets, \
+#define ICE_PKT_PROFILE(type, m) { \
+ .match = (m), \
+ .pkt = ice_dummy_##type##_packet, \
+ .pkt_len = sizeof(ice_dummy_##type##_packet), \
+ .offsets = ice_dummy_##type##_packet_offsets, \
+ .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
}
+ICE_DECLARE_PKT_OFFSETS(vlan) = {
+ { ICE_VLAN_OFOS, 12 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(vlan) = {
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+};
+
+ICE_DECLARE_PKT_OFFSETS(qinq) = {
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(qinq) = {
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+};
+
ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -506,38 +527,6 @@ ICE_DECLARE_PKT_TEMPLATE(udp) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
-ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_UDP_ILOS, 38 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (801.1Q), IPv4:UDP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
-
- 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x11, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
- 0x00, 0x08, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* offset info for MAC + IPv4 + TCP dummy packet */
ICE_DECLARE_PKT_OFFSETS(tcp) = {
{ ICE_MAC_OFOS, 0 },
@@ -570,41 +559,6 @@ ICE_DECLARE_PKT_TEMPLATE(tcp) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_TCP_IL, 38 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (801.1Q), IPv4:TCP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
-
- 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x06, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x50, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -640,46 +594,6 @@ ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + TCP */
-ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_TCP_IL, 58 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x50, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* IPv6 + UDP */
ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
@@ -717,43 +631,6 @@ ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + UDP */
-ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_UDP_ILOS, 58 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
-
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
- 0x00, 0x08, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
@@ -1233,6 +1110,154 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00,
};
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_TCP_IL, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 20 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_UDP_ILOS, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 20 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_TCP_IL, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_UDP_ILOS, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
ICE_PKT_GTP_NOPAY),
@@ -1259,6 +1284,11 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
ICE_PKT_INNER_TCP),
ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
@@ -1271,14 +1301,9 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6),
ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
- ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
- ICE_PKT_VLAN),
ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
- ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
- ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
- ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
ICE_PKT_PROFILE(tcp, 0),
};
@@ -1737,7 +1762,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
sw_buf->res_type =
@@ -2230,8 +2256,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->sw_id = swid;
pi->pf_vf_num = pf_vf_num;
pi->is_vf = is_vf;
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
@@ -2666,7 +2690,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
@@ -3848,7 +3873,7 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
- * @hw: pointer to the hardware structure
+ * @pi: pointer to the port_info structure
* @vsi_handle: VSI handle to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
@@ -3856,25 +3881,20 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info f_info;
- enum ice_adminq_opc opcode;
- u16 s_rule_size;
+ struct ice_hw *hw = pi->hw;
u16 hw_vsi_id;
int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
- if (!s_rule)
- return -ENOMEM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&f_info, 0, sizeof(f_info));
@@ -3882,54 +3902,80 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
f_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_info.vsi_handle = vsi_handle;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = hw->port_info->lport;
f_info.src_id = ICE_SRC_ID_LPORT;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_tx_vsi_rule_id;
}
+ f_list_entry.fltr_info = f_info;
if (set)
- opcode = ice_aqc_opc_add_sw_rules;
+ status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
else
- opcode = ice_aqc_opc_remove_sw_rules;
-
- ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
- if (status || !(f_info.flag & ICE_FLTR_TX_RX))
- goto out;
- if (set) {
- u16 index = le16_to_cpu(s_rule->index);
-
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_tx_vsi_rule_id = index;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_rx_vsi_rule_id = index;
- }
- } else {
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
+ status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
+
+ return status;
+}
+
+/**
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_handle: VSI handle to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ fm_entry->vsi_list_info &&
+ (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
+}
+
+/**
+ * ice_check_if_dflt_vsi - check if VSI is default VSI
+ * @pi: pointer to the port_info structure
+ * @vsi_handle: vsi handle to check for in filter list
+ * @rule_exists: indicates if there are any VSI's in the rule list
+ *
+ * checks if the VSI is in a default VSI list, and also indicates
+ * if the default VSI list is empty
+ */
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists)
+{
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_sw_recipe *recp_list;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ bool ret = false;
+
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
+ rule_lock = &recp_list->filt_rule_lock;
+ rule_head = &recp_list->filt_rules;
+
+ mutex_lock(rule_lock);
+
+ if (rule_exists && !list_empty(rule_head))
+ *rule_exists = true;
+
+ list_for_each_entry(fm_entry, rule_head, list_entry) {
+ if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
+ ret = true;
+ break;
}
}
-out:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
+ mutex_unlock(rule_lock);
+
+ return ret;
}
/**
@@ -4049,21 +4095,6 @@ int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
}
/**
- * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
- * @fm_entry: filter entry to inspect
- * @vsi_handle: VSI handle to compare with filter info
- */
-static bool
-ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
-{
- return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
- fm_entry->fltr_info.vsi_handle == vsi_handle) ||
- (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
- fm_entry->vsi_list_info &&
- (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
-}
-
-/**
* ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
@@ -4609,6 +4640,9 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, { 0, 2, 4, 6 } },
{ ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
{ ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
+ { ICE_PPPOE, { 0, 2, 4, 6 } },
+ { ICE_VLAN_EX, { 2, 0 } },
+ { ICE_VLAN_IN, { 2, 0 } },
};
static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
@@ -4629,6 +4663,9 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_PPPOE, ICE_PPPOE_HW },
+ { ICE_VLAN_EX, ICE_VLAN_OF_HW },
+ { ICE_VLAN_IN, ICE_VLAN_OL_HW },
};
/**
@@ -5313,10 +5350,11 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* ice_add_special_words - Add words that are not protocols, such as metadata
* @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure
+ * @dvm_ena: is double VLAN mode enabled
*/
static int
ice_add_special_words(struct ice_adv_rule_info *rinfo,
- struct ice_prot_lkup_ext *lkup_exts)
+ struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
{
u16 mask;
@@ -5335,6 +5373,19 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
}
}
+ if (rinfo->vlan_type != 0 && dvm_ena) {
+ if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
+ u8 word = lkup_exts->n_val_words++;
+
+ lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
+ lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
+ lkup_exts->field_mask[word] =
+ ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
+ } else {
+ return -ENOSPC;
+ }
+ }
+
return 0;
}
@@ -5454,7 +5505,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, lkup_exts);
+ status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
goto err_free_lkup_exts;
@@ -5555,6 +5606,79 @@ err_free_lkup_exts:
}
/**
+ * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
+ *
+ * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
+ * @num_vlan: number of VLAN tags
+ */
+static struct ice_dummy_pkt_profile *
+ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
+ u32 num_vlan)
+{
+ struct ice_dummy_pkt_profile *profile;
+ struct ice_dummy_pkt_offsets *offsets;
+ u32 buf_len, off, etype_off, i;
+ u8 *pkt;
+
+ if (num_vlan < 1 || num_vlan > 2)
+ return ERR_PTR(-EINVAL);
+
+ off = num_vlan * VLAN_HLEN;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
+ dummy_pkt->offsets_len;
+ offsets = kzalloc(buf_len, GFP_KERNEL);
+ if (!offsets)
+ return ERR_PTR(-ENOMEM);
+
+ offsets[0] = dummy_pkt->offsets[0];
+ if (num_vlan == 2) {
+ offsets[1] = ice_dummy_qinq_packet_offsets[0];
+ offsets[2] = ice_dummy_qinq_packet_offsets[1];
+ } else if (num_vlan == 1) {
+ offsets[1] = ice_dummy_vlan_packet_offsets[0];
+ }
+
+ for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
+ offsets[i + num_vlan].offset =
+ dummy_pkt->offsets[i].offset + off;
+ }
+ offsets[i + num_vlan] = dummy_pkt->offsets[i];
+
+ etype_off = dummy_pkt->offsets[1].offset;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
+ dummy_pkt->pkt_len;
+ pkt = kzalloc(buf_len, GFP_KERNEL);
+ if (!pkt) {
+ kfree(offsets);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(pkt, dummy_pkt->pkt, etype_off);
+ memcpy(pkt + etype_off,
+ num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
+ off);
+ memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
+ dummy_pkt->pkt_len - etype_off);
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile) {
+ kfree(offsets);
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ profile->offsets = offsets;
+ profile->pkt = pkt;
+ profile->pkt_len = buf_len;
+ profile->match |= ICE_PKT_KMALLOC;
+
+ return profile;
+}
+
+/**
* ice_find_dummy_packet - find dummy packet
*
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5569,7 +5693,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
enum ice_sw_tunnel_type tun_type)
{
const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
- u32 match = 0;
+ u32 match = 0, vlan_count = 0;
u16 i;
switch (tun_type) {
@@ -5597,8 +5721,11 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
match |= ICE_PKT_INNER_TCP;
else if (lkups[i].type == ICE_IPV6_OFOS)
match |= ICE_PKT_OUTER_IPV6;
- else if (lkups[i].type == ICE_VLAN_OFOS)
- match |= ICE_PKT_VLAN;
+ else if (lkups[i].type == ICE_VLAN_OFOS ||
+ lkups[i].type == ICE_VLAN_EX)
+ vlan_count++;
+ else if (lkups[i].type == ICE_VLAN_IN)
+ vlan_count++;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
@@ -5615,11 +5742,20 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
match |= ICE_PKT_INNER_IPV6;
else if (lkups[i].type == ICE_GTP_NO_PAY)
match |= ICE_PKT_GTP_NOPAY;
+ else if (lkups[i].type == ICE_PPPOE) {
+ match |= ICE_PKT_PPPOE;
+ if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
+ htons(PPP_IPV6))
+ match |= ICE_PKT_OUTER_IPV6;
+ }
}
while (ret->match && (match & ret->match) != ret->match)
ret++;
+ if (vlan_count != 0)
+ ret = ice_dummy_packet_add_vlan(ret, vlan_count);
+
return ret;
}
@@ -5678,6 +5814,8 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_ethtype_hdr);
break;
case ICE_VLAN_OFOS:
+ case ICE_VLAN_EX:
+ case ICE_VLAN_IN:
len = sizeof(struct ice_vlan_hdr);
break;
case ICE_IPV4_OFOS:
@@ -5707,6 +5845,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GTP:
len = sizeof(struct ice_udp_gtp_hdr);
break;
+ case ICE_PPPOE:
+ len = sizeof(struct ice_pppoe_hdr);
+ break;
default:
return -EINVAL;
}
@@ -5783,6 +5924,36 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
}
/**
+ * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
+ * @vlan_type: VLAN tag type
+ * @pkt: dummy packet to fill in
+ * @offsets: offset info for the dummy packet
+ */
+static int
+ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u16 i;
+
+ /* Find VLAN header and insert VLAN TPID */
+ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ if (offsets[i].type == ICE_VLAN_OFOS ||
+ offsets[i].type == ICE_VLAN_EX) {
+ struct ice_vlan_hdr *hdr;
+ u16 offset;
+
+ offset = offsets[i].offset;
+ hdr = (struct ice_vlan_hdr *)&pkt[offset];
+ hdr->type = cpu_to_be16(vlan_type);
+
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+/**
* ice_find_adv_rule_entry - Search a rule entry
* @hw: pointer to the hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5817,6 +5988,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
rinfo->tun_type == list_itr->rule_info.tun_type &&
+ rinfo->vlan_type == list_itr->rule_info.vlan_type &&
lkups_matched)
return list_itr;
}
@@ -5993,16 +6165,22 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* locate a dummy packet */
profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
- return -EIO;
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
+ status = -EIO;
+ goto free_pkt_profile;
+ }
vsi_handle = rinfo->sw_act.vsi_handle;
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return -EINVAL;
+ if (!ice_is_vsi_valid(hw, vsi_handle)) {
+ status = -EINVAL;
+ goto free_pkt_profile;
+ }
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
@@ -6012,7 +6190,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
if (status)
- return status;
+ goto free_pkt_profile;
m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
if (m_entry) {
/* we have to add VSI to VSI_LIST and increment vsi_count.
@@ -6031,12 +6209,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
}
- return status;
+ goto free_pkt_profile;
}
rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
- if (!s_rule)
- return -ENOMEM;
+ if (!s_rule) {
+ status = -ENOMEM;
+ goto free_pkt_profile;
+ }
if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE;
@@ -6105,6 +6285,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto err_ice_add_adv_rule;
}
+ if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
+ status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
+ s_rule->hdr_data,
+ profile->offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+ }
+
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
NULL);
@@ -6150,6 +6338,13 @@ err_ice_add_adv_rule:
kfree(s_rule);
+free_pkt_profile:
+ if (profile->match & ICE_PKT_KMALLOC) {
+ kfree(profile->offsets);
+ kfree(profile->pkt);
+ kfree(profile);
+ }
+
return status;
}
@@ -6342,7 +6537,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, &lkup_exts);
+ status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
if (status)
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index eb641e5512d2..68d8e8a6a189 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -192,6 +192,7 @@ struct ice_adv_rule_info {
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
+ u16 vlan_type;
struct ice_adv_rule_flags_info flags_info;
};
@@ -358,7 +359,13 @@ int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
/* Promisc/defport setup for VSIs */
-int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction);
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists);
+
int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index b803f2ab3cc7..a298862857a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -50,6 +50,15 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_VLAN)
lkups_cnt++;
+ /* is CVLAN specified? */
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ lkups_cnt++;
+
+ /* are PPPoE options specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
+ ICE_TC_FLWR_FIELD_PPP_PROTO))
+ lkups_cnt++;
+
/* are IPv[4|6] fields specified? */
if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
@@ -134,6 +143,18 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
}
}
+static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
+{
+ switch (vlan_tpid) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ case ETH_P_QINQ1:
+ return vlan_tpid;
+ default:
+ return 0;
+ }
+}
+
static int
ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
struct ice_adv_lkup_elem *list)
@@ -269,8 +290,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
bool inner = false;
+ u16 vlan_tpid = 0;
int i = 0;
+ rule_info->vlan_type = vlan_tpid;
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
@@ -311,12 +335,48 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* copy VLAN info */
if (flags & ICE_TC_FLWR_FIELD_VLAN) {
- list[i].type = ICE_VLAN_OFOS;
+ vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
+ rule_info->vlan_type =
+ ice_check_supported_vlan_tpid(vlan_tpid);
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ list[i].type = ICE_VLAN_EX;
+ else
+ list[i].type = ICE_VLAN_OFOS;
list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
i++;
}
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].type = ICE_VLAN_IN;
+ list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
+ ICE_TC_FLWR_FIELD_PPP_PROTO)) {
+ struct ice_pppoe_hdr *vals, *masks;
+
+ vals = &list[i].h_u.pppoe_hdr;
+ masks = &list[i].m_u.pppoe_hdr;
+
+ list[i].type = ICE_PPPOE;
+
+ if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
+ vals->session_id = headers->pppoe_hdr.session_id;
+ masks->session_id = cpu_to_be16(0xFFFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
+ vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
+ masks->ppp_prot_id = cpu_to_be16(0xFFFF);
+ }
+
+ i++;
+ }
+
/* copy L3 (IPv[4|6]: src, dest) address */
if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
ICE_TC_FLWR_FIELD_SRC_IPV4)) {
@@ -661,6 +721,31 @@ exit:
}
/**
+ * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: Pointer to outer header fields
+ * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
+ */
+static u16
+ice_tc_set_pppoe(struct flow_match_pppoe *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ if (match->mask->session_id) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
+ headers->pppoe_hdr.session_id = match->key->session_id;
+ }
+
+ if (match->mask->ppp_proto) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
+ headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
+ }
+
+ return be16_to_cpu(match->key->type);
+}
+
+/**
* ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
* @match: Pointer to flow match structure
* @fltr: Pointer to filter structure
@@ -945,6 +1030,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
@@ -954,7 +1040,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PPPOE))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -1060,6 +1147,50 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
if (match.mask->vlan_priority)
headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_tpid)
+ headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan match;
+
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
+ return -EINVAL;
+ }
+
+ flow_rule_match_cvlan(rule, &match);
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Bad CVLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ headers->cvlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
+ if (match.mask->vlan_priority)
+ headers->cvlan_hdr.vlan_prio = match.key->vlan_priority;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
+ struct flow_match_pppoe match;
+
+ flow_rule_match_pppoe(rule, &match);
+ n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
+
+ /* If ethertype equals ETH_P_PPP_SES, n_proto might be
+ * overwritten by encapsulated protocol (ppp_proto field) or set
+ * to 0. To correct this, flow_match_pppoe provides the type
+ * field, which contains the actual ethertype (ETH_P_PPP_SES).
+ */
+ headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
+ headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
+ fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -1194,7 +1325,7 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
vsi->netdev->dev_addr);
- memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN);
+ eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
}
/* validate specified dest MAC address, make sure either it belongs to
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index e25e958f4396..91cd3d3778c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -23,6 +23,9 @@
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
+#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
+#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -40,6 +43,12 @@ struct ice_tc_flower_action {
struct ice_tc_vlan_hdr {
__be16 vlan_id; /* Only last 12 bits valid */
u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_tpid;
+};
+
+struct ice_tc_pppoe_hdr {
+ __be16 session_id;
+ __be16 ppp_proto;
};
struct ice_tc_l2_hdr {
@@ -81,6 +90,8 @@ struct ice_tc_flower_lyr_2_4_hdrs {
struct ice_tc_l2_hdr l2_key;
struct ice_tc_l2_hdr l2_mask;
struct ice_tc_vlan_hdr vlan_hdr;
+ struct ice_tc_vlan_hdr cvlan_hdr;
+ struct ice_tc_pppoe_hdr pppoe_hdr;
/* L3 (IPv4[6]) layer fields with their mask */
struct ice_tc_l3_hdr l3_key;
struct ice_tc_l3_hdr l3_mask;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f2a518a1fd94..861b64322959 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -693,10 +693,6 @@ struct ice_port_info {
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
- u16 dflt_tx_vsi_rule_id;
- u16 dflt_tx_vsi_num;
- u16 dflt_rx_vsi_rule_id;
- u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 7adf9ddf129e..8fd7c3e37f5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -271,13 +271,14 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
}
/**
- * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
+ * are in unicast promiscuous mode
* @pf: PF structure for accessing VF(s)
*
- * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * Return false if no VF(s) are in unicast promiscuous mode,
* else return true
*/
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
{
bool is_vf_promisc = false;
struct ice_vf *vf;
@@ -286,8 +287,7 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
rcu_read_lock();
ice_for_each_vf_rcu(pf, bkt, vf) {
/* found a VF that has promiscuous mode configured */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
is_vf_promisc = true;
break;
}
@@ -298,6 +298,73 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
}
/**
+ * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ * @ucast_m: promiscuous mask to apply to unicast
+ * @mcast_m: promiscuous mask to apply to multicast
+ *
+ * Decide which mask should be used for unicast and multicast filter,
+ * based on presence of VLANs
+ */
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m)
+{
+ if (ice_vf_is_port_vlan_ena(vf) ||
+ ice_vsi_has_non_zero_vlans(vsi)) {
+ *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ } else {
+ *mcast_m = ICE_MCAST_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_PROMISC_BITS;
+ }
+}
+
+/**
+ * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ *
+ * Clear all promiscuous/allmulticast filters for a VF
+ */
+static int
+ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vf->pf;
+ u8 ucast_m, mcast_m;
+ int ret = 0;
+
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ } else {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+ }
+
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
+ }
+ }
+
+ if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
+ }
+ }
+ return ret;
+}
+
+/**
* ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
* @vf: the VF to configure
* @vsi: the VF's VSI
@@ -487,7 +554,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
struct ice_vsi *vsi;
struct device *dev;
struct ice_hw *hw;
- u8 promisc_m;
int err = 0;
bool rsd;
@@ -554,16 +620,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
*/
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
-
- if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
- dev_err(dev, "disabling promiscuous mode failed\n");
- }
+ ice_vf_clear_all_promisc_modes(vf, vsi);
ice_eswitch_del_vf_mac_rule(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 1b4380d6d949..52bd9a3816bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -214,7 +214,10 @@ struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
void ice_set_vf_state_qs_dis(struct ice_vf *vf);
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m);
int
ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int
@@ -260,7 +263,7 @@ static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
}
-static inline bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+static inline bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 24188ec594d5..094e3c97a1ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -360,6 +360,54 @@ static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
}
/**
+ * ice_vc_get_vlan_caps
+ * @hw: pointer to the hw
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VSI
+ * @driver_caps: current driver caps
+ *
+ * Return 0 if there is no VLAN caps supported, or VLAN caps value
+ */
+static u32
+ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
+ u32 driver_caps)
+{
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ /* In switchdev setting VLAN from VF isn't supported */
+ return 0;
+
+ if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ /* VLAN offloads based on current device configuration */
+ return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
+ } else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
+ * these two conditions, which amounts to guest VLAN filtering
+ * and offloads being based on the inner VLAN or the
+ * inner/single VLAN respectively and don't allow VF to
+ * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
+ */
+ if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
+ return VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (!ice_is_dvm_ena(hw) &&
+ !ice_vf_is_port_vlan_ena(vf)) {
+ /* configure backward compatible support for VFs that
+ * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
+ * configured in SVM, and no port VLAN is configured
+ */
+ ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
+ return VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (ice_is_dvm_ena(hw)) {
+ /* configure software offloaded VLAN support when DVM
+ * is enabled, but no port VLAN is enabled
+ */
+ ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_vc_get_vf_res_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -402,33 +450,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
goto err;
}
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
- /* VLAN offloads based on current device configuration */
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2;
- } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
- /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
- * these two conditions, which amounts to guest VLAN filtering
- * and offloads being based on the inner VLAN or the
- * inner/single VLAN respectively and don't allow VF to
- * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
- */
- if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
- } else if (!ice_is_dvm_ena(hw) &&
- !ice_vf_is_port_vlan_ena(vf)) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
- /* configure backward compatible support for VFs that
- * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
- * configured in SVM, and no port VLAN is configured
- */
- ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
- } else if (ice_is_dvm_ena(hw)) {
- /* configure software offloaded VLAN support when DVM
- * is enabled, but no port VLAN is enabled
- */
- ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
- }
- }
+ vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
+ vf->driver_caps);
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
@@ -976,6 +999,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
int mcast_err = 0, ucast_err = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ u8 mcast_m, ucast_m;
struct device *dev;
int ret = 0;
@@ -1022,39 +1046,33 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
- bool set_dflt_vsi = alluni || allmulti;
+ if (alluni) {
+ /* in this case we're turning on promiscuous mode */
+ ret = ice_set_dflt_vsi(vsi);
+ } else {
+ /* in this case we're turning off promiscuous mode */
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ }
- if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
- /* only attempt to set the default forwarding VSI if
- * it's not currently set
- */
- ret = ice_set_dflt_vsi(pf->first_sw, vsi);
- else if (!set_dflt_vsi &&
- ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
- /* only attempt to free the default forwarding VSI if we
- * are the owner
- */
- ret = ice_clear_dflt_vsi(pf->first_sw);
+ /* in this case we're turning on/off only
+ * allmulticast
+ */
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
if (ret) {
- dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
- set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
+ dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
+ vf->vf_id, ret);
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
goto error_param;
}
} else {
- u8 mcast_m, ucast_m;
-
- if (ice_vf_is_port_vlan_ena(vf) ||
- ice_vsi_has_non_zero_vlans(vsi)) {
- mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
- ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
- } else {
- mcast_m = ICE_MCAST_PROMISC_BITS;
- ucast_m = ICE_UCAST_PROMISC_BITS;
- }
-
if (alluni)
ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
else
@@ -1079,6 +1097,9 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_states))
dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, mcast_err);
}
if (!ucast_err) {
@@ -1091,6 +1112,9 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_states))
dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, ucast_err);
}
error_param:
@@ -3529,42 +3553,6 @@ ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
}
-static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't enable VLAN stripping in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
-static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't disable VLAN stripping in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
static int
ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
{
@@ -3591,10 +3579,10 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.config_rss_lut = ice_vc_config_rss_lut,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
- .add_vlan_msg = ice_vc_repr_add_vlan,
- .remove_vlan_msg = ice_vc_repr_del_vlan,
- .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping,
- .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
index 1b618de592b7..bcda2e004807 100644
--- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -199,7 +199,6 @@ static bool ice_is_dvm_supported(struct ice_hw *hw)
#define ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX 2
#define ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX 2
#define ICE_PKT_FLAGS_0_TO_15_FV_IDX 1
-#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
{
/* Update recipe ICE_SW_LKUP_VLAN to filter based on the
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index cbe92fd23a70..8d6e44ee1895 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2207,7 +2207,7 @@ out:
* igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
* @hw: pointer to the HW structure
*
- * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
* the values found in the EEPROM. This addresses an issue in which these
* bits are not restored from EEPROM after reset.
**/
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index ca5429774994..fa028928482f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1033,9 +1033,6 @@
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
-
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1277c5c7d099..205d577bdbba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -854,7 +854,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 9cb49980ec2d..eb9f6da9208a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -116,7 +116,6 @@
#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c5f04c40284b..d8b836a85cc3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1945,7 +1945,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
* However, when we do so, no frame from queue 2 and 3 are
* transmitted. It seems the MAX_TPKT_SIZE should not be great
* or _equal_ to the buffer size programmed in TXPBS. For this
- * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
+ * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64.
*/
val = (4096 - 1) / 64;
wr32(E1000_I210_DTXMXPKTSZ, val);
@@ -6260,74 +6260,108 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *tx_ring,
struct xdp_frame *xdpf)
{
- union e1000_adv_tx_desc *tx_desc;
- u32 len, cmd_type, olinfo_status;
- struct igb_tx_buffer *tx_buffer;
- dma_addr_t dma;
- u16 i;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, i, index = tx_ring->next_to_use;
+ struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index];
+ struct igb_tx_buffer *tx_buffer = tx_head;
+ union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index);
+ u32 len = xdpf->len, cmd_type, olinfo_status;
+ void *data = xdpf->data;
- len = xdpf->len;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- if (unlikely(!igb_desc_unused(tx_ring)))
- return IGB_XDP_CONSUMED;
-
- dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ if (igb_maybe_stop_tx(tx_ring, count + 3))
return IGB_XDP_CONSUMED;
+ i = 0;
/* record the location of the first descriptor for this packet */
- tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->type = IGB_TYPE_XDP;
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- i = tx_ring->next_to_use;
- tx_desc = IGB_TX_DESC(tx_ring, i);
+ olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT;
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- dma_unmap_len_set(tx_buffer, len, len);
- dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_buffer->type = IGB_TYPE_XDP;
- tx_buffer->xdpf = xdpf;
+ for (;;) {
+ dma_addr_t dma;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto unmap;
- /* put descriptor type bits */
- cmd_type = E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_DEXT |
- E1000_ADVTXD_DCMD_IFCS;
- cmd_type |= len | IGB_TXD_DCMD;
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
- olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring */
- if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
- olinfo_status |= tx_ring->reg_idx << 4;
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS | len;
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ tx_buffer->protocol = 0;
- netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
+ if (++index == tx_ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+ tx_buffer = &tx_ring->tx_buffer_info[index];
+ tx_desc = IGB_TX_DESC(tx_ring, index);
+ tx_desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount);
/* set the timestamp */
- tx_buffer->time_stamp = jiffies;
+ tx_head->time_stamp = jiffies;
/* Avoid any potential race with xdp_xmit and cleanup */
smp_wmb();
/* set next_to_watch value indicating a packet is present */
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- tx_buffer->next_to_watch = tx_desc;
- tx_ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ tx_ring->next_to_use = index;
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
- writel(i, tx_ring->tail);
+ writel(index, tx_ring->tail);
return IGB_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[index];
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ if (tx_buffer == tx_head)
+ break;
+
+ if (!index)
+ index += tx_ring->count;
+ index--;
+ }
+
+ return IGB_XDP_CONSUMED;
}
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
@@ -8818,6 +8852,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
@@ -9522,7 +9557,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
igb_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 02fec948ce64..15e57460e19e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -190,7 +190,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
}
/* PTP clock operations */
-static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfine_82576(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
@@ -199,15 +199,14 @@ static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
u64 rate;
u32 incvalue;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- rate = ppb;
- rate <<= 14;
- rate = div_u64(rate, 1953125);
- incvalue = 16 << IGB_82576_TSYNC_SHIFT;
+ incvalue = INCVALUE_82576;
+ rate = mul_u64_u64_div_u64(incvalue, (u64)scaled_ppm,
+ 1000000ULL << 16);
if (neg_adj)
incvalue -= rate;
@@ -1347,7 +1346,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.max_adj = 999999881;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+ adapter->ptp_caps.adjfine = igb_ptp_adjfine_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 975eb47ee04d..57d39ee00b58 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -227,7 +227,7 @@ struct igbvf_adapter {
/* The VF counters don't clear on read so we have to get a base
* count on driver start up and always subtract that base on
- * on the first update, thus the flag..
+ * the first update, thus the flag..
*/
struct e1000_vf_stats stats;
u64 zero_base;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 43ced78c3a2e..f4e91db89fe5 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2537,7 +2537,7 @@ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
igbvf_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 360644f33d5f..88680e3d613d 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -89,8 +89,6 @@ struct igc_mac_info {
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
- u8 forced_speed_duplex;
-
bool asf_firmware_present;
bool arc_subsystem_valid;
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 67b8ffd21d8a..a5c4b19d71a2 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -193,7 +193,7 @@ s32 igc_force_mac_fc(struct igc_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index a5ebee7df4a8..ebff0e04045d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5813,9 +5813,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
return false;
for (n = 0; n < qopt->num_entries; n++) {
- const struct tc_taprio_sched_entry *e;
+ const struct tc_taprio_sched_entry *e, *prev;
int i;
+ prev = n ? &qopt->entries[n - 1] : NULL;
e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
@@ -5828,7 +5829,12 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->gate_mask & BIT(i))
queue_uses[i]++;
- if (queue_uses[i] > 1)
+ /* There are limitations: A single queue cannot be
+ * opened and closed multiple times per cycle unless the
+ * gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
return false;
}
}
@@ -5872,6 +5878,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
+ bool queue_configured[IGC_MAX_TX_QUEUES] = { };
u32 start_time = 0, end_time = 0;
size_t n;
@@ -5887,9 +5894,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
- /* FIXME: be a little smarter about cases when the gate for a
- * queue stays open for more than one entry.
- */
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
int i;
@@ -5902,8 +5906,15 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!(e->gate_mask & BIT(i)))
continue;
- ring->start_time = start_time;
+ /* Check whether a queue stays open for more than one
+ * entry. If so, keep the start and advance the end
+ * time.
+ */
+ if (!queue_configured[i])
+ ring->start_time = start_time;
ring->end_time = end_time;
+
+ queue_configured[i] = true;
}
start_time += e->interval;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 653e9f1e35b5..8dbb9f903ca7 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -15,7 +15,6 @@
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
-#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
#define IGC_PTM_STAT_SLEEP 2
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 026c3b65fc37..c0d8214148d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -59,9 +59,6 @@
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
-/* MSI-X Table Register Descriptions */
-#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
-
/* RSS registers */
#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index c8d1e815ec6b..98bd3267b99b 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -576,7 +576,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
* Writes a value to the specified offset in the VLAN filter table.
*
* hw - Struct containing variables accessed by shared code
- * offset - Offset in VLAN filer table to write
+ * offset - Offset in VLAN filter table to write
* value - Value to write into VLAN filter table
*****************************************************************************/
void
@@ -588,7 +588,7 @@ ixgb_write_vfta(struct ixgb_hw *hw,
}
/******************************************************************************
- * Clears the VLAN filer table
+ * Clears the VLAN filter table
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index affdefcca7e3..45be9a1ab6af 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1187,7 +1187,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
iph = ip_hdr(skb);
iph->tot_len = 0;
@@ -1704,7 +1704,6 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
netdev->stats.tx_window_errors = 0;
}
-#define IXGB_MAX_INTR 10
/**
* ixgb_intr - Interrupt Handler
* @irq: interrupt number
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index f0cadd532c53..d40f96250691 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -141,8 +141,6 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define MAX_RDTR 0xFFFF
#define MIN_RDTR 0
-#define XSUMRX_DEFAULT OPTION_ENABLED
-
#define DEFAULT_FCRTL 0x28000
#define DEFAULT_FCRTH 0x30000
#define MIN_FCRTL 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8813b4dd6872..5369a97ff5ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -167,12 +167,46 @@ enum ixgbe_tx_flags {
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+
+struct vf_stats {
+ u64 gprc;
+ u64 gorc;
+ u64 gptc;
+ u64 gotc;
+ u64 mprc;
+};
+
struct vf_data_storage {
struct pci_dev *vfdev;
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
bool clear_to_send;
+ struct vf_stats vfstats;
+ struct vf_stats last_vfstats;
+ struct vf_stats saved_rst_vfstats;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 95c92fe890a1..100388968e4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -879,7 +879,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* ixgbe_clear_vfta_82598 - Clear VLAN filter table
* @hw: pointer to hardware structure
*
- * Clears the VLAN filer table, and the VMDq index associated with the filter
+ * Clears the VLAN filter table, and the VMDq index associated with the filter
**/
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 4c26c4b92f07..38c4609bd429 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3237,7 +3237,7 @@ vfta_update:
* ixgbe_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to hardware structure
*
- * Clears the VLAN filer table, and the VMDq index associated with the filter
+ * Clears the VLAN filter table, and the VMDq index associated with the filter
**/
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 72e6ebffea33..e85f7d2e8810 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -8,12 +8,10 @@
#include "ixgbe_sriov.h"
/* Callbacks for DCB netlink in the kernel */
-#define BIT_DCB_MODE 0x01
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
-#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 628d0eb0599f..04f453eabef6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -18,8 +18,6 @@
#include "ixgbe_phy.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBE_STATS};
struct ixgbe_stats {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 55f91c9ff047..d1e430b8c8aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5161,7 +5161,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
}
/**
- * ixgbe_lpbthresh - calculate low water mark for for flow control
+ * ixgbe_lpbthresh - calculate low water mark for flow control
*
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
@@ -5549,6 +5549,47 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
return ret;
}
+/**
+ * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
+ * @adapter: board private structure
+ *
+ * On a reset we need to clear out the VF stats or accounting gets
+ * messed up because they're not clear on read.
+ **/
+static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ adapter->vfinfo[i].last_vfstats.gprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gprc +=
+ adapter->vfinfo[i].vfstats.gprc;
+ adapter->vfinfo[i].vfstats.gprc = 0;
+ adapter->vfinfo[i].last_vfstats.gptc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gptc +=
+ adapter->vfinfo[i].vfstats.gptc;
+ adapter->vfinfo[i].vfstats.gptc = 0;
+ adapter->vfinfo[i].last_vfstats.gorc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gorc +=
+ adapter->vfinfo[i].vfstats.gorc;
+ adapter->vfinfo[i].vfstats.gorc = 0;
+ adapter->vfinfo[i].last_vfstats.gotc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gotc +=
+ adapter->vfinfo[i].vfstats.gotc;
+ adapter->vfinfo[i].vfstats.gotc = 0;
+ adapter->vfinfo[i].last_vfstats.mprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.mprc +=
+ adapter->vfinfo[i].vfstats.mprc;
+ adapter->vfinfo[i].vfstats.mprc = 0;
+ }
+}
+
static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -5684,6 +5725,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->service_timer, jiffies);
+ ixgbe_clear_vf_stats_counters(adapter);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -7274,6 +7316,32 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
netdev->stats.rx_length_errors = hwstats->rlec;
netdev->stats.rx_crc_errors = hwstats->crcerrs;
netdev->stats.rx_missed_errors = total_mpc;
+
+ /* VF Stats Collection - skip while resetting because these
+ * are not clear on read and otherwise you'll sometimes get
+ * crazy values.
+ */
+ if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ for (i = 0; i < adapter->num_vfs; i++) {
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i),
+ adapter->vfinfo[i].last_vfstats.gprc,
+ adapter->vfinfo[i].vfstats.gprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i),
+ adapter->vfinfo[i].last_vfstats.gptc,
+ adapter->vfinfo[i].vfstats.gptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i),
+ IXGBE_PVFGORC_MSB(i),
+ adapter->vfinfo[i].last_vfstats.gorc,
+ adapter->vfinfo[i].vfstats.gorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i),
+ IXGBE_PVFGOTC_MSB(i),
+ adapter->vfinfo[i].last_vfstats.gotc,
+ adapter->vfinfo[i].vfstats.gotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i),
+ adapter->vfinfo[i].last_vfstats.mprc,
+ adapter->vfinfo[i].vfstats.mprc);
+ }
+ }
}
/**
@@ -9025,6 +9093,23 @@ static void ixgbe_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
}
+static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (vf < 0 || vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc;
+ vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc;
+ vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc;
+ vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc;
+ vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc;
+
+ return 0;
+}
+
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
@@ -10341,6 +10426,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
+ .ndo_get_vf_stats = ixgbe_ndo_get_vf_stats,
.ndo_get_stats64 = ixgbe_get_stats64,
.ndo_setup_tc = __ixgbe_setup_tc,
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 336426a67ac1..9f06896a049b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -113,12 +113,16 @@
* the sign bit. This register enables software to calculate frequency
* adjustments and apply them directly to the clock rate.
*
- * The math for converting ppb into TIMINCA values is fairly straightforward.
- * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL
+ * The math for converting scaled_ppm into TIMINCA values is fairly
+ * straightforward.
*
- * This assumes that ppb is never high enough to create a value bigger than
- * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this
- * value is also simple.
+ * TIMINCA value = ( Base_Frequency * scaled_ppm ) / 1000000ULL << 16
+ *
+ * To avoid overflow, we simply use mul_u64_u64_div_u64.
+ *
+ * This assumes that scaled_ppm is never high enough to create a value bigger
+ * than TIMINCA's 31 bits can store. This is ensured by the stack, and is
+ * measured in parts per billion. Calculating this value is also simple.
* Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL
*
* For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is
@@ -138,7 +142,6 @@
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
-#define MAX_TIMADJ 0x7FFFFFFF
/**
* ixgbe_ptp_setup_sdp_X540
@@ -434,45 +437,45 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_ptp_adjfreq_82599
+ * ixgbe_ptp_adjfine_82599
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated scaled_ppm from the base frequency.
*
- * adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
+static int ixgbe_ptp_adjfine_82599(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
- u64 freq, incval;
- u32 diff;
+ u64 incval, diff;
int neg_adj = 0;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
smp_mb();
incval = READ_ONCE(adapter->base_incval);
- freq = incval;
- freq *= ppb;
- diff = div_u64(freq, 1000000000ULL);
+ diff = mul_u64_u64_div_u64(incval, scaled_ppm,
+ 1000000ULL << 16);
incval = neg_adj ? (incval - diff) : (incval + diff);
switch (hw->mac.type) {
case ixgbe_mac_X540:
if (incval > 0xFFFFFFFFULL)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval);
break;
case ixgbe_mac_82599EB:
if (incval > 0x00FFFFFFULL)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL));
@@ -485,32 +488,35 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
}
/**
- * ixgbe_ptp_adjfreq_X550
+ * ixgbe_ptp_adjfine_X550
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
*
- * adjust the frequency of the SYSTIME registers by the indicated ppb from base
- * frequency
+ * Adjust the frequency of the SYSTIME registers by the indicated scaled_ppm
+ * from base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb)
+static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
int neg_adj = 0;
- u64 rate = IXGBE_X550_BASE_PERIOD;
+ u64 rate;
u32 inca;
- if (ppb < 0) {
+ if (scaled_ppm < 0) {
neg_adj = 1;
- ppb = -ppb;
+ scaled_ppm = -scaled_ppm;
}
- rate *= ppb;
- rate = div_u64(rate, 1000000000ULL);
+
+ rate = mul_u64_u64_div_u64(IXGBE_X550_BASE_PERIOD, scaled_ppm,
+ 1000000ULL << 16);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
inca = rate & INCVALUE_MASK;
if (neg_adj)
@@ -1356,7 +1362,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
@@ -1373,7 +1379,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
@@ -1389,7 +1395,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index a1e69c734863..29cc60988071 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -77,7 +77,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
adapter->bridge_mode = BRIDGE_MODE_VEB;
- /* limit trafffic classes based on VFs enabled */
+ /* limit traffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6da9880d766a..7f7ea468ffa9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2533,6 +2533,13 @@ enum {
#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
(IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index e4b50c7781ff..35c2b9b8bd19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1737,7 +1737,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for native SFP support.
+ * Configure the integrated PHY for native SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
@@ -1786,7 +1786,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for SFP support.
+ * Configure the integrated PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 3b41f83c8dff..fed46872af2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -17,8 +17,6 @@
#include "ixgbevf.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBEVF_STATS};
struct ixgbe_stats {
@@ -130,8 +128,6 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
adapter->msg_enable = data;
}
-#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
#define IXGBE_REGS_LEN 45
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 55b87bc3a938..2f12fbe229c1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4787,7 +4787,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
rtnl_unlock();
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 68fc32e36e88..1641d00d8ed3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -964,7 +964,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- /* if we we didn't get an ACK there must have been
+ /* if we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 57eff4e9e6de..b6be0552a6c1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -775,7 +775,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
int tx_index;
struct tx_desc *desc;
int ret;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 384f5a16753d..0caa2df87c04 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2664,8 +2664,8 @@ err_drop_frame:
static inline void
mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ int hdr_len = skb_tcp_all_headers(skb);
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2727,7 +2727,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
if ((txq->count + tso_count_descs(skb)) >= txq->size)
return 0;
- if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
pr_info("*** Is this even possible?\n");
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 7f4a4ca9af78..40203560b291 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o
+ rvu_sdp.o rvu_npc_hash.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 25491edc35ce..c8724bfa86b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -498,6 +498,32 @@ static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
}
+static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = cgx->mac_ops->fifo_len;
+ num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC0 gets half of the FIFO, reset 1/4th */
+ if (lmac_id == 0)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
/* Configure CGX LMAC in internal loopback mode */
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
{
@@ -847,6 +873,11 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
+ /* Disable all PFC classes by default */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
}
int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
@@ -899,6 +930,7 @@ int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
return 0;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
if (rx_pause) {
cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
@@ -910,12 +942,13 @@ int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
CGXX_SMUX_CBFC_CTL_DRP_EN);
}
- if (tx_pause)
+ if (tx_pause) {
cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
- else
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ } else {
cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
-
- cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ }
cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
@@ -1005,9 +1038,9 @@ int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
dev = &cgx->pdev->dev;
- dev_err(dev, "cgx port %d:%d cmd timeout\n",
- cgx->cgx_id, lmac->lmac_id);
- err = -EIO;
+ dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
+ cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
+ err = LMAC_AF_ERR_CMD_TIMEOUT;
goto unlock;
}
@@ -1433,11 +1466,19 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
u64 req = 0;
u64 resp;
- if (enable)
+ if (enable) {
req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
- else
- req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ /* On CN10K firmware offloads link bring up/down operations to ECP
+ * On Octeontx2 link operations are handled by firmware itself
+ * which can cause mbox errors so configure maximum time firmware
+ * poll for Link as 1000 ms
+ */
+ if (!is_dev_rpm(cgx))
+ req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
+ } else {
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ }
return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
}
@@ -1689,6 +1730,7 @@ static struct mac_ops cgx_mac_ops = {
.tx_stats_cnt = 18,
.get_nr_lmacs = cgx_get_nr_lmacs,
.get_lmac_type = cgx_get_lmac_type,
+ .lmac_fifo_len = cgx_get_lmac_fifo_len,
.mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
.mac_get_rx_stats = cgx_get_rx_stats,
.mac_get_tx_stats = cgx_get_tx_stats,
@@ -1743,6 +1785,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ if (!cgx->lmac_count) {
+ dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
+ err = -EOPNOTSUPP;
+ goto err_release_regions;
+ }
+
nvec = pci_msix_vec_count(cgx->pdev);
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (err < 0 || err != nvec) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index bd2f33a26eee..0b06788b8d80 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -92,7 +92,7 @@
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
-#define CGX_CMD_TIMEOUT 2200 /* msecs */
+#define CGX_CMD_TIMEOUT 5000 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_LMAC_FWI 0
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index f72ec0e2506f..d4a27c882a5b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -261,4 +261,6 @@ struct cgx_lnk_sts {
#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+/* LINK_BRING_UP command timeout */
+#define LINKCFG_TIMEOUT GENMASK_ULL(21, 8)
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index f30581bf0688..52b6016789fa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -80,6 +80,7 @@ struct mac_ops {
*/
int (*get_nr_lmacs)(void *cgx);
u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ u32 (*lmac_fifo_len)(void *cgx, int lmac_id);
int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
bool enable);
/* Register Stats related functions */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 550cb11197bf..d7762577e285 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -33,7 +33,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 6000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -169,9 +169,10 @@ M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
-M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, cgx_mac_addr_reset_req, \
+ msg_rsp) \
M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
- msg_rsp) \
+ cgx_mac_addr_update_rsp) \
M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
@@ -241,6 +242,9 @@ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
npc_mcam_get_stats_req, \
npc_mcam_get_stats_rsp) \
+M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \
+ npc_get_secret_key_req, \
+ npc_get_secret_key_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
@@ -428,6 +432,7 @@ struct get_hw_cap_rsp {
struct mbox_msghdr hdr;
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
+ u8 npc_hash_extract; /* Is hash extract supported */
};
/* CGX mbox message formats */
@@ -451,6 +456,7 @@ struct cgx_fec_stats_rsp {
struct cgx_mac_addr_set_or_get {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
+ u32 index;
};
/* Structure for requesting the operation to
@@ -466,7 +472,7 @@ struct cgx_mac_addr_add_req {
*/
struct cgx_mac_addr_add_rsp {
struct mbox_msghdr hdr;
- u8 index;
+ u32 index;
};
/* Structure for requesting the operation to
@@ -474,7 +480,7 @@ struct cgx_mac_addr_add_rsp {
*/
struct cgx_mac_addr_del_req {
struct mbox_msghdr hdr;
- u8 index;
+ u32 index;
};
/* Structure for response against the operation to
@@ -482,7 +488,7 @@ struct cgx_mac_addr_del_req {
*/
struct cgx_max_dmac_entries_get_rsp {
struct mbox_msghdr hdr;
- u8 max_dmac_filters;
+ u32 max_dmac_filters;
};
struct cgx_link_user_info {
@@ -583,10 +589,20 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_reset_req {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
struct cgx_mac_addr_update_req {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
- u8 index;
+ u32 index;
+};
+
+struct cgx_mac_addr_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 index;
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
@@ -1440,6 +1456,16 @@ struct npc_mcam_get_stats_rsp {
u8 stat_ena; /* enabled */
};
+struct npc_get_secret_key_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+};
+
+struct npc_get_secret_key_rsp {
+ struct mbox_msghdr hdr;
+ u64 secret_key[3];
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
@@ -1622,6 +1648,11 @@ enum cgx_af_status {
LMAC_AF_ERR_PERM_DENIED = -1103,
LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
+ LMAC_AF_ERR_CMD_TIMEOUT = -1106,
+ LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 9b6e587e78b4..f187293e3e08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -10,6 +10,14 @@
#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
@@ -200,6 +208,7 @@ enum key_fields {
NPC_ERRLEV,
NPC_ERRCODE,
NPC_LXMB,
+ NPC_EXACT_RESULT,
NPC_LA,
NPC_LB,
NPC_LC,
@@ -381,6 +390,22 @@ struct nix_rx_action {
};
/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_EXACT_NIBBLE_START 40
+#define NPC_EXACT_NIBBLE_END 43
+#define NPC_EXACT_NIBBLE GENMASK_ULL(43, 40)
+
+/* NPC_EXACT_KEX_S nibble definitions for each field */
+#define NPC_EXACT_NIBBLE_HIT BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_OPC BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_WAY BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_INDEX GENMASK_ULL(43, 41)
+
+#define NPC_EXACT_RESULT_HIT BIT_ULL(0)
+#define NPC_EXACT_RESULT_OPC GENMASK_ULL(2, 1)
+#define NPC_EXACT_RESULT_WAY GENMASK_ULL(4, 3)
+#define NPC_EXACT_RESULT_IDX GENMASK_ULL(15, 5)
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
/* NPC_PARSE_KEX_S nibble definitions for each field */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 4180376fa676..a820bad3abb2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -155,7 +155,7 @@
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
- NPC_PARSE_NIBBLE_ERRCODE | \
+ NPC_PARSE_NIBBLE_L2L3_BCAST | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -15123,7 +15123,8 @@ static struct npc_mcam_kex npc_mkex_default = {
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
/* nibble: LA..LE (ltype only) + Error code + Channel */
- [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX |
+ (u64)NPC_EXACT_NIBBLE_HIT,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
},
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 47e83d7a5804..ef59de43b11e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -22,6 +22,7 @@ static struct mac_ops rpm_mac_ops = {
.tx_stats_cnt = 34,
.get_nr_lmacs = rpm_get_nr_lmacs,
.get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm_get_lmac_fifo_len,
.mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
.mac_get_rx_stats = rpm_get_rx_stats,
.mac_get_tx_stats = rpm_get_tx_stats,
@@ -276,6 +277,14 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable all PFC classes */
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+
+ /* Enable channel mask for all LMACS */
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -342,6 +351,35 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
return err;
}
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 hi_perf_lmac;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = rpm->mac_ops->fifo_len;
+ num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO and rest 1/4th */
+ hi_perf_lmac = rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS);
+ hi_perf_lmac = (hi_perf_lmac >> 4) & 0x3ULL;
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
@@ -387,15 +425,14 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
{
rpm_t *rpm = rpmd;
- u64 cfg;
+ u64 cfg, class_en;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
- /* reset PFC class quanta and threshold */
- rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
-
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
if (rx_pause) {
cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
@@ -410,9 +447,11 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (tx_pause) {
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, class_en);
} else {
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, 0, class_en);
}
if (!rx_pause && !tx_pause)
@@ -422,9 +461,7 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
- cfg = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, cfg);
- rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, class_en);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 9ab8d49dd180..c2bd6e54ea51 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -48,10 +48,10 @@
#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
-#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
#define RPMX_CMR_RX_OVR_BP 0x4120
#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_CMR_CHAN_MSK_OR 0x4118
#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
@@ -70,11 +70,12 @@
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
-#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPM_DEFAULT_PAUSE_TIME 0x7FF
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id);
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 54e1b27a7dfe..6809b8b4c556 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -18,6 +18,7 @@
#include "ptp.h"
#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -68,6 +69,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
hw->cap.nix_shaper_toggle_wait = false;
+ hw->cap.npc_hash_extract = false;
+ hw->cap.npc_exact_match_enabled = false;
hw->rvu = rvu;
if (is_rvu_pre_96xx_C0(rvu)) {
@@ -85,6 +88,9 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
if (!is_rvu_otx2(rvu))
hw->cap.per_pf_mbox_regs = true;
+
+ if (is_rvu_npc_hash_extract_en(rvu))
+ hw->cap.npc_hash_extract = true;
}
/* Poll a RVU block's register 'offset', for a 'zero'
@@ -1122,6 +1128,12 @@ cpt:
goto cgx_err;
}
+ err = rvu_npc_exact_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "failed to initialize exact match table\n");
+ return err;
+ }
+
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
@@ -1991,6 +2003,7 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
rsp->nix_shaping = hw->cap.nix_shaping;
+ rsp->npc_hash_extract = hw->cap.npc_hash_extract;
return 0;
}
@@ -2548,6 +2561,9 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
{
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_reset(rvu, pcifunc);
+
mutex_lock(&rvu->flr_lock);
/* Reset order should reflect inter-block dependencies:
* 1. Reset any packet/work sources (NIX, CPT, TIM)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 513b43ecd5be..d15bc443335d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -338,6 +338,8 @@ struct hw_cap {
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
bool ipolicer;
+ bool npc_hash_extract; /* Hash extract enabled ? */
+ bool npc_exact_match_enabled; /* Exact match supported ? */
};
struct rvu_hwinfo {
@@ -369,6 +371,7 @@ struct rvu_hwinfo {
struct rvu *rvu;
struct npc_pkind pkind;
struct npc_mcam mcam;
+ struct npc_exact_table *table;
};
struct mbox_wq_info {
@@ -419,6 +422,7 @@ struct npc_kpu_profile_adapter {
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex_hash *mkex_hash;
bool custom;
size_t pkinds;
size_t kpus;
@@ -575,6 +579,17 @@ static inline bool is_rvu_otx2(struct rvu *rvu)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
+{
+ u64 npc_const3;
+
+ npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return false;
+
+ return true;
+}
+
static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
u8 lmacid, u8 chan)
{
@@ -754,7 +769,6 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
/* NPC APIs */
-int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
@@ -773,14 +787,17 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
+
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable);
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
+
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
@@ -810,11 +827,16 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
-
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
int index);
+int rvu_npc_init(struct rvu *rvu);
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask);
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
/* CPT APIs */
int rvu_cpt_register_interrupts(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 9ffe99830e34..addc69f4b65c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "lmac_common.h"
#include "rvu_reg.h"
#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -474,6 +475,11 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
if (!is_cgx_config_permitted(rvu, pcifunc))
return;
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rvu_npc_exact_reset(rvu, pcifunc);
+ return;
+ }
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_dev = cgx_get_pdata(cgx_id);
lmac_count = cgx_get_lmac_cnt(cgx_dev);
@@ -584,6 +590,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -602,6 +611,9 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
if (rc >= 0) {
@@ -622,6 +634,9 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
}
@@ -643,6 +658,11 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
return 0;
}
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
+ return 0;
+ }
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
return 0;
@@ -680,6 +700,10 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, true);
@@ -695,6 +719,10 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, false);
@@ -833,6 +861,22 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu)
return fifo_len;
}
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
+{
+ struct mac_ops *mac_ops;
+ void *cgxd;
+
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ return 0;
+
+ mac_ops = get_mac_ops(cgxd);
+ if (!mac_ops->lmac_fifo_len)
+ return 0;
+
+ return mac_ops->lmac_fifo_len(cgxd, lmac);
+}
+
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
@@ -1059,7 +1103,7 @@ int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!rvu->fwdata)
- return -ENXIO;
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
if (!is_pf_cgxmapped(rvu, pf))
return -EPERM;
@@ -1088,7 +1132,7 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -1098,12 +1142,16 @@ int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
+
return cgx_lmac_addr_reset(cgx_id, lmac_id);
}
int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
- struct msg_rsp *rsp)
+ struct cgx_mac_addr_update_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
@@ -1111,6 +1159,9 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index a9da85e418a4..38bbae5d9ae0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -17,7 +17,7 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
-#define CPT_CTX_ILEN 2
+#define CPT_CTX_ILEN 2ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@@ -480,7 +480,7 @@ static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
*/
if (!is_rvu_otx2(rvu)) {
val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
- val |= rvu->hw->cpt_chan_base;
+ val |= (u64)rvu->hw->cpt_chan_base;
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 2ad73b180276..f42a09f04b25 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -18,6 +18,7 @@
#include "cgx.h"
#include "lmac_common.h"
#include "npc.h"
+#include "rvu_npc_hash.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -2600,6 +2601,170 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
+ struct npc_exact_table_entry *cam_entry;
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i, j;
+
+ u8 bitmap = 0;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Check if there is at least one entry in mem table */
+ if (!table->mem_tbl_entry_cnt)
+ goto dump_cam_table;
+
+ /* Print table headers */
+ seq_puts(s, "\n\tExact Match MEM Table\n");
+ seq_puts(s, "Index\t");
+
+ for (i = 0; i < table->mem_table.ways; i++) {
+ mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
+ struct npc_exact_table_entry, list);
+
+ seq_printf(s, "Way-%d\t\t\t\t\t", i);
+ }
+
+ seq_puts(s, "\n");
+ for (i = 0; i < table->mem_table.ways; i++)
+ seq_puts(s, "\tChan MAC \t");
+
+ seq_puts(s, "\n\n");
+
+ /* Print mem table entries */
+ for (i = 0; i < table->mem_table.depth; i++) {
+ bitmap = 0;
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!mem_entry[j])
+ continue;
+
+ if (mem_entry[j]->index != i)
+ continue;
+
+ bitmap |= BIT(j);
+ }
+
+ /* No valid entries */
+ if (!bitmap)
+ continue;
+
+ seq_printf(s, "%d\t", i);
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!(bitmap & BIT(j))) {
+ seq_puts(s, "nil\t\t\t\t\t");
+ continue;
+ }
+
+ seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
+ mem_entry[j]->mac);
+ mem_entry[j] = list_next_entry(mem_entry[j], list);
+ }
+ seq_puts(s, "\n");
+ }
+
+dump_cam_table:
+
+ if (!table->cam_tbl_entry_cnt)
+ goto done;
+
+ seq_puts(s, "\n\tExact Match CAM Table\n");
+ seq_puts(s, "index\tchan\tMAC\n");
+
+ /* Traverse cam table entries */
+ list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
+ seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
+ cam_entry->mac);
+ }
+
+done:
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
+
+static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i;
+
+ table = rvu->hw->table;
+
+ seq_puts(s, "\n\tExact Table Info\n");
+ seq_printf(s, "Exact Match Feature : %s\n",
+ rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return 0;
+
+ seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
+
+ seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
+
+ seq_puts(s, "\n\tMEM Table Info\n");
+ seq_printf(s, "Ways : %d\n", table->mem_table.ways);
+ seq_printf(s, "Depth : %d\n", table->mem_table.depth);
+ seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
+ seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
+ seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
+
+ seq_puts(s, "\n\tCAM Table Info\n");
+ seq_printf(s, "Depth : %d\n", table->cam_table.depth);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
+
+static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ struct npc_key_field *field;
+ u16 chan, pcifunc;
+ int blkaddr, i;
+ u64 cfg, cam1;
+ char *str;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ table = rvu->hw->table;
+
+ field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
+
+ seq_puts(s, "\n\t Exact Hit on drop status\n");
+ seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
+
+ for (i = 0; i < table->num_drop_rules; i++) {
+ pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
+
+ /* channel will be always in keyword 0 */
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
+ chan = field->kw_mask[0] & cam1;
+
+ str = (cfg & 1) ? "enabled" : "disabled";
+
+ seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(table->counter_idx[i])),
+ chan, str);
+ }
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
+
static void rvu_dbg_npc_init(struct rvu *rvu)
{
rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
@@ -2608,8 +2773,22 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
&rvu_dbg_npc_mcam_info_fops);
debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
&rvu_dbg_npc_mcam_rules_fops);
+
debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
&rvu_dbg_npc_rx_miss_act_fops);
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return;
+
+ debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_entries_fops);
+
+ debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_info_fops);
+
+ debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_drop_cnt_fops);
+
}
static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index d0ab8f233a02..88dee589cb21 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -10,6 +10,7 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "rvu_struct.h"
+#include "rvu_npc_hash.h"
#define DRV_NAME "octeontx2-af"
@@ -1436,14 +1437,75 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
};
+static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ bool enabled;
+
+ enabled = rvu_npc_exact_has_match_table(rvu);
+
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
+ enabled ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_npc_exact_disable_feature(rvu);
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 enable;
+
+ if (kstrtoull(val.vstr, 10, &enable)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 value is supported");
+ return -EINVAL;
+ }
+
+ if (enable != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only disabling exact match feature is supported");
+ return -EINVAL;
+ }
+
+ if (rvu_npc_exact_can_disable_feature(rvu))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't disable exact match feature; Please try before any configuration");
+ return -EFAULT;
+}
+
static const struct devlink_param rvu_af_dl_params[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
"dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
};
/* Devlink switch mode */
@@ -1501,6 +1563,7 @@ int rvu_register_dl(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
struct devlink *dl;
+ size_t size;
int err;
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
@@ -1522,8 +1585,12 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
- err = devlink_params_register(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+ /* Register exact match devlink only for CN10K-B */
+ size = ARRAY_SIZE(rvu_af_dl_params);
+ if (!rvu_npc_exact_has_match_table(rvu))
+ size -= 1;
+
+ err = devlink_params_register(dl, rvu_af_dl_params, size);
if (err) {
dev_err(rvu->dev,
"devlink params register failed with error %d", err);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0fa625e2528e..0879a48411f3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -14,6 +14,7 @@
#include "npc.h"
#include "cgx.h"
#include "lmac_common.h"
+#include "rvu_npc_hash.h"
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
@@ -3792,9 +3793,15 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_promisc_enable(rvu, pcifunc);
} else {
if (!nix_rx_multicast)
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_promisc_disable(rvu, pcifunc);
}
return 0;
@@ -4003,9 +4010,13 @@ linkcfg:
return 0;
/* Update transmit credits for CGX links */
- lmac_fifo_len =
- rvu_cgx_get_fifolen(rvu) /
- cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, lmac);
+ return 0;
+ }
return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
(lmac_fifo_len - req->maxlen) / 16);
}
@@ -4057,7 +4068,10 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
struct rvu_hwinfo *hw = rvu->hw;
int cgx, lmac_cnt, slink, link;
u16 lbk_max_frs, lmac_max_frs;
+ unsigned long lmac_bmap;
u64 tx_credits, cfg;
+ u64 lmac_fifo_len;
+ int iter;
rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
@@ -4091,12 +4105,23 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
/* Skip when cgx is not available or lmac cnt is zero */
if (lmac_cnt <= 0)
continue;
- tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
- lmac_max_frs) / 16;
- /* Enable credits and set credit pkt count to max allowed */
- cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
slink = cgx * hw->lmac_per_cgx;
- for (link = slink; link < (slink + lmac_cnt); link++) {
+
+ /* Get LMAC id's from bitmap */
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, iter);
+ continue;
+ }
+ tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+
+ link = iter + slink;
nix_hw->tx_credits[link] = tx_credits;
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 3a31fb8cc155..583ead4dd246 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -15,6 +15,7 @@
#include "npc.h"
#include "cgx.h"
#include "npc_profile.h"
+#include "rvu_npc_hash.h"
#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
@@ -1105,6 +1106,34 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
NIXLF_PROMISC_ENTRY, false);
}
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+
+ mutex_lock(&mcam->lock);
+
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->intf != intf)
+ continue;
+
+ if (rule->entry != entry)
+ continue;
+
+ rule->enable = enable;
+ mutex_unlock(&mcam->lock);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ entry, enable);
+
+ return true;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return false;
+}
+
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
/* Enables only broadcast match entry. Promisc/Allmulti are enabled
@@ -1181,14 +1210,6 @@ void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
}
-#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
-
-#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-
static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
struct npc_mcam_kex *mkex, u8 intf)
{
@@ -1262,6 +1283,9 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
}
+
+ /* Programme mkex hash profile */
+ npc_program_mkex_hash(rvu, blkaddr);
}
static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
@@ -1463,6 +1487,7 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
profile->lt_def = &npc_lt_defaults;
profile->mkex = &npc_mkex_default;
+ profile->mkex_hash = &npc_mkex_hash_default;
return 0;
}
@@ -1819,7 +1844,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
-
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
*/
@@ -2047,6 +2071,7 @@ int rvu_npc_init(struct rvu *rvu)
rvu_npc_setup_interfaces(rvu, blkaddr);
+ npc_config_secret_key(rvu, blkaddr);
/* Configure MKEX profile */
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
@@ -2534,7 +2559,7 @@ alloc:
/* Copy MCAM entry indices into mbox response entry_list.
* Requester always expects indices in ascending order, so
- * so reverse the list if reverse bitmap is used for allocation.
+ * reverse the list if reverse bitmap is used for allocation.
*/
if (!req->contig && rsp->count) {
index = 0;
@@ -2562,6 +2587,14 @@ alloc:
return 0;
}
+/* Marks bitmaps to reserved the mcam slot */
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ npc_mcam_set_bit(mcam, entry_idx);
+}
+
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
struct npc_mcam_alloc_entry_req *req,
struct npc_mcam_alloc_entry_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 19c53e591d0d..a400aa22da79 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -10,6 +10,8 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "npc.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
#define NPC_BYTESM GENMASK_ULL(19, 16)
#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
@@ -227,6 +229,25 @@ static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
return true;
}
+static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 40 ... 43:
+ type = NPC_EXACT_RESULT;
+ break;
+
+ default:
+ return;
+ }
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
u8 key_nibble, u8 intf)
{
@@ -276,6 +297,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
default:
return;
}
+
npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
}
@@ -509,8 +531,8 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u8 lid, lt, ld, bitnr;
+ u64 cfg, masked_cfg;
u8 key_nibble = 0;
- u64 cfg;
/* Scan and note how parse result is going to be in key.
* A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
@@ -518,12 +540,24 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
* will be concatenated in key.
*/
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
- cfg &= NPC_PARSE_NIBBLE;
- for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ masked_cfg = cfg & NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) {
npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
key_nibble++;
}
+ /* Ignore exact match bits for mcam entries except the first rule
+ * which is drop on hit. This first rule is configured explitcitly by
+ * exact match code.
+ */
+ masked_cfg = cfg & NPC_EXACT_NIBBLE;
+ bitnr = NPC_EXACT_NIBBLE_START;
+ for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg,
+ NPC_EXACT_NIBBLE_START) {
+ npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
/* Scan and note how layer data is going to be in key */
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
@@ -624,9 +658,9 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
* If any bits in mask are 0 then corresponding bits in value are
* dont care.
*/
-static void npc_update_entry(struct rvu *rvu, enum key_fields type,
- struct mcam_entry *entry, u64 val_lo,
- u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry dummy = { {0} };
@@ -705,8 +739,6 @@ static void npc_update_entry(struct rvu *rvu, enum key_fields type,
}
}
-#define IPV6_WORDS 4
-
static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
u64 features, struct flow_msg *pkt,
struct flow_msg *mask,
@@ -779,7 +811,8 @@ static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
u64 features, struct flow_msg *pkt,
struct flow_msg *mask,
- struct rvu_npc_mcam_rule *output, u8 intf)
+ struct rvu_npc_mcam_rule *output, u8 intf,
+ int blkaddr)
{
u64 dmac_mask = ether_addr_to_u64(mask->dmac);
u64 smac_mask = ether_addr_to_u64(mask->smac);
@@ -828,6 +861,7 @@ do { \
} while (0)
NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+
NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
ntohs(mask->etype), 0);
@@ -854,10 +888,12 @@ do { \
npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
npc_update_vlan_features(rvu, entry, features, intf);
+
+ npc_update_field_hash(rvu, intf, entry, blkaddr, features,
+ pkt, mask, opkt, omask);
}
-static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
- u16 entry)
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry)
{
struct rvu_npc_mcam_rule *iter;
@@ -1023,8 +1059,9 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
u16 owner = req->hdr.pcifunc;
struct msg_rsp write_rsp;
struct mcam_entry *entry;
- int entry_index, err;
bool new = false;
+ u16 entry_index;
+ int err;
installed_features = req->features;
features = req->features;
@@ -1032,7 +1069,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
entry_index = req->entry;
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
- req->intf);
+ req->intf, blkaddr);
if (is_npc_intf_rx(req->intf))
npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
@@ -1057,7 +1094,8 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, missing_features,
&def_ucast_rule->packet,
&def_ucast_rule->mask,
- &dummy, req->intf);
+ &dummy, req->intf,
+ blkaddr);
installed_features = req->features | missing_features;
}
@@ -1424,3 +1462,98 @@ void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
}
mutex_unlock(&mcam->lock);
}
+
+/* single drop on non hit rule starting from 0th index. This an extension
+ * to RPM mac filter to support more rules.
+ */
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ struct npc_mcam_write_entry_req req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ struct msg_rsp rsp;
+ bool enabled;
+ int blkaddr;
+ int err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Bail out if no exact match support */
+ if (!rvu_npc_exact_has_match_table(rvu)) {
+ dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If 0th entry is already used, return err */
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx);
+ if (enabled) {
+ dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n",
+ __func__, mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Add this entry to mcam rules list */
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ /* Disable rule by default. Enable rule when first dmac filter is
+ * installed
+ */
+ rule->enable = false;
+ rule->chan = chan_val;
+ rule->chan_mask = chan_mask;
+ rule->entry = mcam_idx;
+ rvu_mcam_add_rule(mcam, rule);
+
+ /* Reserve slot 0 */
+ npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx);
+
+ /* Allocate counter for this single drop on non hit rule */
+ cntr_req.hdr.pcifunc = 0; /* AF request */
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n",
+ __func__, err);
+ return -EFAULT;
+ }
+ *counter_idx = cntr_rsp.cntr;
+
+ /* Fill in fields for this mcam entry */
+ npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0,
+ exact_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0,
+ chan_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0,
+ bcast_mcast_mask, 0, NIX_INTF_RX);
+
+ req.intf = NIX_INTF_RX;
+ req.set_cntr = true;
+ req.cntr = cntr_rsp.cntr;
+ req.entry = mcam_idx;
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n",
+ __func__, mcam_idx);
+ return err;
+ }
+
+ dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n",
+ __func__, mcam_idx, req.cntr);
+
+ /* disable entry at Bank 0, index 0 */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
new file mode 100644
index 000000000000..bdd65ce56a32
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_FS_H
+#define __RVU_NPC_FS_H
+
+#define IPV6_WORDS 4
+
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf);
+
+#endif /* RVU_NPC_FS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
new file mode 100644
index 000000000000..594029007f85
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -0,0 +1,2009 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/debugfs.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
+
+static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
+ size_t width_bits)
+{
+ const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
+ const size_t msb = start_bit + width_bits - 1;
+ const size_t lword = start_bit >> 6;
+ const size_t uword = msb >> 6;
+ size_t lbits;
+ u64 hi, lo;
+
+ if (lword == uword)
+ return (input[lword] >> (start_bit & 63)) & mask;
+
+ lbits = 64 - (start_bit & 63);
+ hi = input[uword];
+ lo = (input[lword] >> (start_bit & 63));
+ return ((hi << lbits) | lo) & mask;
+}
+
+static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
+{
+ u64 prev_orig_word = 0;
+ u64 cur_orig_word = 0;
+ size_t extra = key_bit_len % 64;
+ size_t max_idx = key_bit_len / 64;
+ size_t i;
+
+ if (extra)
+ max_idx++;
+
+ for (i = 0; i < max_idx; i++) {
+ cur_orig_word = key[i];
+ key[i] = key[i] << 1;
+ key[i] |= ((prev_orig_word >> 63) & 0x1);
+ prev_orig_word = cur_orig_word;
+ }
+}
+
+static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
+ size_t key_bit_len)
+{
+ u32 hash_out = 0;
+ u64 temp_data = 0;
+ int i;
+
+ for (i = data_bit_len - 1; i >= 0; i--) {
+ temp_data = (data[i / 64]);
+ temp_data = temp_data >> (i % 64);
+ temp_data &= 0x1;
+ if (temp_data)
+ hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
+
+ rvu_npc_lshift_key(key, key_bit_len);
+ }
+
+ return hash_out;
+}
+
+u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+ u64 *secret_key, u8 intf, u8 hash_idx)
+{
+ u64 hash_key[3];
+ u64 data_padded[2];
+ u32 field_hash;
+
+ hash_key[0] = secret_key[1] << 31;
+ hash_key[0] |= secret_key[2];
+ hash_key[1] = secret_key[1] >> 33;
+ hash_key[1] |= secret_key[0] << 31;
+ hash_key[2] = secret_key[0] >> 33;
+
+ data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0];
+ data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1];
+ field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
+
+ field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32;
+ field_hash |= mkex_hash->hash_ctrl[intf][hash_idx];
+ return field_hash;
+}
+
+static u64 npc_update_use_hash(int lt, int ld)
+{
+ u64 cfg = 0;
+
+ switch (lt) {
+ case NPC_LT_LC_IP6:
+ /* Update use_hash(bit-20) and bytesm1 (bit-16:19)
+ * in KEX_LD_CFG
+ */
+ cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+ ld ? 0x8 : 0x18,
+ 0x1, 0x0, 0x10);
+ break;
+ }
+
+ return cfg;
+}
+
+static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_tx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg = npc_update_use_hash(lt, ld);
+
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ }
+ }
+ }
+ }
+}
+
+static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg = npc_update_use_hash(lt, ld);
+
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ hash_cnt++;
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+ }
+ }
+ }
+}
+
+void npc_config_secret_key(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract) {
+ dev_info(rvu->dev, "HW does not support secret key configuration\n");
+ return;
+ }
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
+ RVU_NPC_HASH_SECRET_KEY0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
+ RVU_NPC_HASH_SECRET_KEY1);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
+ RVU_NPC_HASH_SECRET_KEY2);
+ }
+}
+
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract) {
+ dev_dbg(rvu->dev, "Field hash extract feature is not supported\n");
+ return;
+ }
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_hash_rx(rvu, blkaddr, intf);
+ npc_program_mkex_hash_tx(rvu, blkaddr, intf);
+ }
+}
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ struct npc_get_secret_key_req req;
+ struct npc_get_secret_key_rsp rsp;
+ u64 ldata[2], cfg;
+ u32 field_hash;
+ u8 hash_idx;
+
+ if (!rvu->hw->cap.npc_hash_extract) {
+ dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
+ return;
+ }
+
+ req.intf = intf;
+ rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp);
+
+ for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
+ if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
+ u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
+ u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
+ u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
+ switch (ltype & ltype_mask) {
+ /* If hash extract enabled is supported for IPv6 then
+ * 128 bit IPv6 source and destination addressed
+ * is hashed to 32 bit value.
+ */
+ case NPC_LT_LC_IP6:
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ u32 src_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+ ldata[0] = (u64)src_ip[0] << 32 | src_ip[1];
+ ldata[1] = (u64)src_ip[2] << 32 | src_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ mkex_hash,
+ rsp.secret_key,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry,
+ field_hash, 0, 32, 0, intf);
+ memcpy(&opkt->ip6src, &pkt->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&omask->ip6src, &mask->ip6src,
+ sizeof(mask->ip6src));
+ break;
+ }
+
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ u32 dst_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+ ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1];
+ ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ mkex_hash,
+ rsp.secret_key,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry,
+ field_hash, 0, 32, 0, intf);
+ memcpy(&opkt->ip6dst, &pkt->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&omask->ip6dst, &mask->ip6dst,
+ sizeof(mask->ip6dst));
+ }
+ break;
+ }
+ }
+ }
+ }
+}
+
+int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
+ struct npc_get_secret_key_req *req,
+ struct npc_get_secret_key_rsp *rsp)
+{
+ u64 *secret_key = rsp->secret_key;
+ u8 intf = req->intf;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
+ secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
+ secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
+ * @mac_addr: MAC address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+
+ return mac;
+}
+
+/**
+ * rvu_exact_prepare_mdata - Make mdata for mcam entry
+ * @mac: MAC address
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @mask: LDATA mask.
+ * Return: Meta data
+ */
+static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac);
+
+ /* Please note that mask is 48bit which excludes chan and ctype.
+ * Increase mask bits if we need to include them as well.
+ */
+ ldata |= ((u64)chan << 48);
+ ldata |= ((u64)ctype << 60);
+ ldata &= mask;
+ ldata = ldata << 2;
+
+ return ldata;
+}
+
+/**
+ * rvu_exact_calculate_hash - calculate hash index to mem table.
+ * @rvu: resource virtualization unit.
+ * @chan: Channel number
+ * @ctype: Channel type.
+ * @mac: MAC address
+ * @mask: HASH mask.
+ * @table_depth: Depth of table.
+ * Return: Hash value
+ */
+static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
+ u64 mask, u32 table_depth)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ u64 hash_key[2];
+ u64 key_in[2];
+ u64 ldata;
+ u32 hash;
+
+ key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
+ key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
+
+ hash_key[0] = key_in[0] << 31;
+ hash_key[0] |= key_in[1];
+ hash_key[1] = key_in[0] >> 33;
+
+ ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
+
+ dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
+ ldata, hash_key[1], hash_key[0]);
+ hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
+
+ hash &= table->mem_table.hash_mask;
+ hash += table->mem_table.hash_offset;
+ dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash);
+
+ return hash;
+}
+
+/**
+ * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
+ * @rvu: resource virtualization unit.
+ * @way: Indicate way to table.
+ * @index: Hash index to 4 way table.
+ * @hash: Hash value.
+ *
+ * Searches 4 way table using hash index. Returns 0 on success.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
+ u32 *index, unsigned int hash)
+{
+ struct npc_exact_table *table;
+ int depth, i;
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ /* Check all the 4 ways for a free slot. */
+ mutex_lock(&table->lock);
+ for (i = 0; i < table->mem_table.ways; i++) {
+ if (test_bit(hash + i * depth, table->mem_table.bmap))
+ continue;
+
+ set_bit(hash + i * depth, table->mem_table.bmap);
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
+ __func__, i, hash);
+
+ *way = i;
+ *index = hash;
+ return 0;
+ }
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
+ bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_free_id - Free seq id from bitmat.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier to be freed.
+ */
+static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ mutex_lock(&table->lock);
+ clear_bit(seq_id, table->id_bmap);
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
+}
+
+/**
+ * rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ * Return: True or false.
+ */
+static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
+ if (idx == table->tot_ids) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
+ __func__, bitmap_weight(table->id_bmap, table->tot_ids));
+
+ return false;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->id_bmap);
+ mutex_unlock(&table->lock);
+
+ *seq_id = idx;
+ dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
+ * @rvu: resource virtualization unit.
+ * @index: Index to exact CAM table.
+ * Return: 0 upon success; else error number.
+ */
+static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
+ if (idx == table->cam_table.depth) {
+ mutex_unlock(&table->lock);
+ dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
+ bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
+ return -ENOSPC;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->cam_table.bmap);
+ mutex_unlock(&table->lock);
+
+ *index = idx;
+ dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
+ __func__, idx);
+ return 0;
+}
+
+/**
+ * rvu_exact_prepare_table_entry - Data for exact match table entry.
+ * @rvu: Resource virtualization unit.
+ * @enable: Enable/Disable entry
+ * @ctype: Software defined channel type. Currently set as 0.
+ * @chan: Channel number.
+ * @mac_addr: Destination mac address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
+ u8 ctype, u16 chan, u8 *mac_addr)
+
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
+
+ /* Enable or disable */
+ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
+
+ /* Set Ctype */
+ mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
+
+ /* Set chan */
+ mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
+
+ /* MAC address */
+ mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
+
+ return mdata;
+}
+
+/**
+ * rvu_exact_config_secret_key - Configure secret key.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_secret_key(struct rvu *rvu)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY1);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY2);
+}
+
+/**
+ * rvu_exact_config_search_key - Configure search key
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_search_key(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg_val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* HDR offset */
+ reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
+
+ /* BYTESM1, number of bytes - 1 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
+
+ /* Enable LID and set LID to NPC_LID_LA */
+ reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
+ reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA);
+
+ /* Clear layer type based extraction */
+
+ /* Disable LT_EN */
+ reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
+
+ /* Set LTYPE_MATCH to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
+
+ /* Set LTYPE_MASK to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
+}
+
+/**
+ * rvu_exact_config_result_ctrl - Set exact table hash control
+ * @rvu: Resource virtualization unit.
+ * @depth: Depth of Exact match table.
+ *
+ * Sets mask and offset for hash for mem table.
+ */
+static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
+{
+ int blkaddr;
+ u64 reg = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Set mask. Note that depth is a power of 2 */
+ rvu->hw->table->mem_table.hash_mask = (depth - 1);
+ reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
+
+ /* Set offset as 0 */
+ rvu->hw->table->mem_table.hash_offset = 0;
+ reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
+
+ /* Set reg for RX */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
+ /* Store hash mask and offset for s/w algorithm */
+}
+
+/**
+ * rvu_exact_config_table_mask - Set exact table mask.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_table_mask(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 mask = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Don't use Ctype */
+ mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
+
+ /* Set chan */
+ mask |= GENMASK_ULL(59, 48);
+
+ /* Full ldata */
+ mask |= GENMASK_ULL(47, 0);
+
+ /* Store mask for s/w hash calcualtion */
+ rvu->hw->table->mem_table.mask = mask;
+
+ /* Set mask for RX.*/
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
+}
+
+/**
+ * rvu_npc_exact_get_max_entries - Get total number of entries in table.
+ * @rvu: resource virtualization unit.
+ * Return: Maximum table entries possible.
+ */
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ return table->tot_ids;
+}
+
+/**
+ * rvu_npc_exact_has_match_table - Checks support for exact match.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match table is supported/enabled.
+ */
+bool rvu_npc_exact_has_match_table(struct rvu *rvu)
+{
+ return rvu->hw->cap.npc_exact_match_enabled;
+}
+
+/**
+ * __rvu_npc_exact_find_entry_by_seq_id - find entry by id
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ *
+ * Caller should acquire the lock.
+ * Return: Pointer to table entry.
+ */
+static struct npc_exact_table_entry *
+__rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *entry = NULL;
+ struct list_head *lhead;
+
+ lhead = &table->lhead_gbl;
+
+ /* traverse to find the matching entry */
+ list_for_each_entry(entry, lhead, glist) {
+ if (entry->seq_id != seq_id)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * rvu_npc_exact_add_to_list - Add entry to list
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE to select MEM/CAM table.
+ * @ways: MEM table ways.
+ * @index: Index in MEM/CAM table.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @mac_addr: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence identifier
+ * @cmd: True if function is called by ethtool cmd
+ * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
+ * @pcifunc: pci function
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
+ u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
+ u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
+{
+ struct npc_exact_table_entry *entry, *tmp, *iter;
+ struct npc_exact_table *table = rvu->hw->table;
+ struct list_head *lhead, *pprev;
+
+ WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
+
+ if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
+ dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
+ return -EFAULT;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rvu_npc_exact_free_id(rvu, *seq_id);
+ dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&table->lock);
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+ lhead = &table->lhead_cam_tbl_entry;
+ table->cam_tbl_entry_cnt++;
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+ lhead = &table->lhead_mem_tbl_entry[ways];
+ table->mem_tbl_entry_cnt++;
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ kfree(entry);
+ rvu_npc_exact_free_id(rvu, *seq_id);
+
+ dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
+ return -EINVAL;
+ }
+
+ /* Add to global list */
+ INIT_LIST_HEAD(&entry->glist);
+ list_add_tail(&entry->glist, &table->lhead_gbl);
+ INIT_LIST_HEAD(&entry->list);
+ entry->index = index;
+ entry->ways = ways;
+ entry->opc_type = opc_type;
+
+ entry->pcifunc = pcifunc;
+
+ ether_addr_copy(entry->mac, mac_addr);
+ entry->chan = chan;
+ entry->ctype = ctype;
+ entry->cgx_id = cgx_id;
+ entry->lmac_id = lmac_id;
+
+ entry->seq_id = *seq_id;
+
+ entry->mcam_idx = mcam_idx;
+ entry->cmd = cmd;
+
+ pprev = lhead;
+
+ /* Insert entry in ascending order of index */
+ list_for_each_entry_safe(iter, tmp, lhead, list) {
+ if (index < iter->index)
+ break;
+
+ pprev = &iter->list;
+ }
+
+ /* Add to each table list */
+ list_add(&entry->list, pprev);
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mem_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @ways: ways for MEM table.
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
+}
+
+/**
+ * rvu_npc_exact_cam_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
+}
+
+/**
+ * rvu_npc_exact_dealloc_table_entry - dealloc table entry
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE for selection of table(MEM or CAM)
+ * @ways: ways if opc_type is MEM table.
+ * @index: Index of MEM or CAM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
+ u8 ways, u32 index)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table *table;
+ u8 null_dmac[6] = { 0 };
+ int depth;
+
+ /* Prepare entry with all fields set to zero */
+ u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ mutex_lock(&table->lock);
+
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index, table->cam_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
+ __func__, ways, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
+ clear_bit(index, table->cam_table.bmap);
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
+ clear_bit(index + ways * depth, table->mem_table.bmap);
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
+ __func__, index, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_alloc_table_entry - Allociate an entry
+ * @rvu: resource virtualization unit.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @index: Index of MEM table or CAM table.
+ * @ways: Ways. Only valid for MEM table.
+ * @opc_type: OPCODE to select table (MEM or CAM)
+ *
+ * Try allocating a slot from MEM table. If all 4 ways
+ * slot are full for a hash index, check availability in
+ * 32-entry CAM table for allocation.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
+ u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
+{
+ struct npc_exact_table *table;
+ unsigned int hash;
+ int err;
+
+ table = rvu->hw->table;
+
+ /* Check in 4-ways mem entry for free slote */
+ hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
+ table->mem_table.depth);
+ err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_MEM;
+ dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
+ __func__, *ways, *index);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
+
+ /* wayss is 0 for cam table */
+ *ways = 0;
+ err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_CAM;
+ dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
+ __func__, *index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: Drop rule index in NPC mcam.
+ * @chan_val: Channel value.
+ * @chan_mask: Channel Mask.
+ * @pcifunc: pcifunc of interface.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
+ u64 chan_val, u64 chan_mask, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
+ continue;
+
+ return false;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX)
+ return false;
+
+ table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
+ table->drop_rule_map[i].chan_val = (u16)chan_val;
+ table->drop_rule_map[i].chan_mask = (u16)chan_mask;
+ table->drop_rule_map[i].pcifunc = pcifunc;
+ table->drop_rule_map[i].valid = true;
+ return true;
+}
+
+/**
+ * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (SDK, LBK or CGX)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LAMC identifier.
+ * @val: Channel number.
+ * @mask: Channel mask.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
+ u8 cgx_id, u8 lmac_id,
+ u64 *val, u64 *mask)
+{
+ u16 chan_val, chan_mask;
+
+ /* No support for SDP and LBK */
+ if (intf_type != NIX_INTF_TYPE_CGX)
+ return false;
+
+ chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
+ chan_mask = 0xfff;
+
+ if (val)
+ *val = chan_val;
+
+ if (mask)
+ *mask = chan_mask;
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
+ * @rvu: resource virtualization unit.
+ * @drop_rule_idx: Drop rule index in NPC mcam.
+ *
+ * Debugfs (exact_drop_cnt) entry displays pcifunc for interface
+ * by retrieving the pcifunc value from data base.
+ * Return: Drop rule index.
+ */
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
+ continue;
+
+ return table->drop_rule_map[i].pcifunc;
+ }
+
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, drop_rule_idx);
+ return -1;
+}
+
+/**
+ * rvu_npc_exact_get_drop_rule_info - Get drop rule information.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (CGX, SDP or LBK)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: Channel value.
+ * @mask: Channel mask.
+ * @pcifunc: pcifunc of interface corresponding to the drop rule.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
+ u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
+ u64 *mask, u16 *pcifunc)
+{
+ struct npc_exact_table *table;
+ u64 chan_val, chan_mask;
+ bool rc;
+ int i;
+
+ table = rvu->hw->table;
+
+ if (intf_type != NIX_INTF_TYPE_CGX) {
+ dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
+ return false;
+ }
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc)
+ return false;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (val)
+ *val = table->drop_rule_map[i].chan_val;
+ if (mask)
+ *mask = table->drop_rule_map[i].chan_mask;
+ if (pcifunc)
+ *pcifunc = table->drop_rule_map[i].pcifunc;
+
+ *drop_mcam_idx = i;
+ return true;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX) {
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, *drop_mcam_idx);
+ return false;
+ }
+
+ dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return false;
+}
+
+/**
+ * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: +1 or -1.
+ * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
+ *
+ * when first exact match entry against a drop rule is added, enable_or_disable_cam
+ * is set to true. When last exact match entry against a drop rule is deleted,
+ * enable_or_disable_cam is set to true.
+ * Return: Number of rules
+ */
+static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
+ int val, bool *enable_or_disable_cam)
+{
+ struct npc_exact_table *table;
+ u16 *cnt, old_cnt;
+ bool promisc;
+
+ table = rvu->hw->table;
+ promisc = table->promisc_mode[drop_mcam_idx];
+
+ cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ old_cnt = *cnt;
+
+ *cnt += val;
+
+ if (!enable_or_disable_cam)
+ goto done;
+
+ *enable_or_disable_cam = false;
+
+ if (promisc)
+ goto done;
+
+ /* If all rules are deleted and not already in promisc mode; disable cam */
+ if (!*cnt && val < 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+ /* If rule got added and not already in promisc mode; enable cam */
+ if (!old_cnt && val > 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+done:
+ return *cnt;
+}
+
+/**
+ * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Deletes entry from linked lists and free up slot in HW MEM or CAM
+ * table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table_entry *entry = NULL;
+ struct npc_exact_table *table;
+ bool disable_cam = false;
+ u32 drop_mcam_idx = -1;
+ int *cnt;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
+ if (!entry) {
+ dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
+ &table->mem_tbl_entry_cnt;
+
+ /* delete from lists */
+ list_del_init(&entry->list);
+ list_del_init(&entry->glist);
+
+ (*cnt)--;
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
+ entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
+ __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ if (entry->cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
+
+ /* No dmac filter rules; disable drop on hit rule */
+ if (disable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ mutex_unlock(&table->lock);
+
+ rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
+
+ rvu_npc_exact_free_id(rvu, seq_id);
+
+ dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
+ __func__, seq_id, entry->mac);
+ kfree(entry);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_add_table_entry - Adds a table entry
+ * @rvu: resource virtualization unit.
+ * @cgx_id: cgx identifier.
+ * @lmac_id: lmac identifier.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence number.
+ * @cmd: Whether it is invoked by ethtool cmd.
+ * @mcam_idx: NPC mcam index corresponding to MAC
+ * @pcifunc: PCI func.
+ *
+ * Creates a new exact match table entry in either CAM or
+ * MEM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
+ u16 chan, u8 ctype, u32 *seq_id, bool cmd,
+ u32 mcam_idx, u16 pcifunc)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ enum npc_exact_opc_type opc_type;
+ bool enable_cam = false;
+ u32 drop_mcam_idx;
+ u32 index;
+ u64 mdata;
+ bool rc;
+ int err;
+ u8 ways;
+
+ ctype = 0;
+
+ err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
+ if (err) {
+ dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
+ return err;
+ }
+
+ /* Write mdata to table */
+ mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
+
+ if (opc_type == NPC_EXACT_OPC_CAM)
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
+ else
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata);
+
+ /* Insert entry to linked list */
+ err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
+ mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
+ if (err) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
+ return err;
+ }
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ if (cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
+
+ /* First command rule; enable drop on hit rule */
+ if (enable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
+ dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, index, mac, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_update_table_entry - Update exact match table.
+ * @rvu: resource virtualization unit.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @old_mac: Existing MAC address entry.
+ * @new_mac: New MAC address entry.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Updates MAC address of an entry. If entry is in MEM table, new
+ * hash value may not match with old one.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
+ u8 *old_mac, u8 *new_mac, u32 *seq_id)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ u32 hash_index;
+ u64 mdata;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
+ if (!entry) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev,
+ "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
+ __func__, cgx_id, lmac_id, old_mac);
+ return -ENODATA;
+ }
+
+ /* If entry is in mem table and new hash index is different than old
+ * hash index, we cannot update the entry. Fail in these scenarios.
+ */
+ if (entry->opc_type == NPC_EXACT_OPC_MEM) {
+ hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
+ new_mac, table->mem_table.mask,
+ table->mem_table.depth);
+ if (hash_index != entry->index) {
+ dev_dbg(rvu->dev,
+ "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
+ __func__, hash_index, entry->index);
+ mutex_unlock(&table->lock);
+ return -EINVAL;
+ }
+ }
+
+ mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
+
+ if (entry->opc_type == NPC_EXACT_OPC_MEM)
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
+ else
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
+
+ /* Update entry fields */
+ ether_addr_copy(entry->mac, new_mac);
+ *seq_id = entry->seq_id;
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, entry->index, entry->mac, entry->ways, entry->opc_type);
+
+ dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
+ __func__, old_mac, new_mac);
+
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_disable - Disable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc
+ *
+ * Drop rule is against each PF. We dont support DMAC filter for
+ * VF.
+ * Return: 0 upon success
+ */
+
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+ u32 cnt;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (!*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = false;
+ cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ mutex_unlock(&table->lock);
+
+ /* If no dmac filter entries configured, disable drop rule */
+ if (!cnt)
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ else
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+
+ dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
+ __func__, cgx_id, lmac_id, cnt);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_enable - Enable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+ u32 cnt;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = true;
+ cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ mutex_unlock(&table->lock);
+
+ /* If no dmac filter entries configured, disable drop rule */
+ if (!cnt)
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ else
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+
+ dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
+ __func__, cgx_id, lmac_id, cnt);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_reset - Delete PF mac address.
+ * @rvu: resource virtualization unit.
+ * @req: Reset request
+ * @rsp: Reset response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ if (rc) {
+ /* TODO: how to handle this error case ? */
+ dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
+ __func__, pfvf->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_update - Update mac address field with new value.
+ * @rvu: resource virtualization unit.
+ * @req: Update request.
+ * @rsp: Update response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ struct rvu_pfvf *pfvf;
+ u32 seq_id, mcam_idx;
+ u8 old_mac[ETH_ALEN];
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
+ __func__, req->index, req->mac_addr);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
+ if (!entry) {
+ dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
+ mutex_unlock(&table->lock);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
+ }
+ ether_addr_copy(old_mac, entry->mac);
+ seq_id = entry->seq_id;
+ mcam_idx = entry->mcam_idx;
+ mutex_unlock(&table->lock);
+
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ /* This could be a new entry */
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
+ pfvf->mac_addr, pf);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id, true,
+ mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev,
+ "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
+ * @rvu: resource virtualization unit.
+ * @req: Add request.
+ * @rsp: Add response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+ u32 seq_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, -1, req->hdr.pcifunc);
+
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_del - Delete DMAC filter
+ * @rvu: resource virtualization unit.
+ * @req: Delete request.
+ * @rsp: Delete response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int rc;
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
+ * @rvu: resource virtualization unit.
+ * @req: Set request.
+ * @rsp: Set response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u32 mcam_idx = -1;
+ int rc, nixlf;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = &rvu->pf[pf];
+
+ /* If table does not have an entry; both update entry and del table entry API
+ * below fails. Those are not failure conditions.
+ */
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pf);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
+ __func__, pfvf->mac_addr, pf);
+ }
+
+ /* find mcam entry if exist */
+ rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
+ if (!rc) {
+ mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
+ __func__, req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev,
+ "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match feature is supported.
+ */
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ bool empty;
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return false;
+
+ mutex_lock(&table->lock);
+ empty = list_empty(&table->lhead_gbl);
+ mutex_unlock(&table->lock);
+
+ return empty;
+}
+
+/**
+ * rvu_npc_exact_disable_feature - Disable feature.
+ * @rvu: resource virtualization unit.
+ */
+void rvu_npc_exact_disable_feature(struct rvu *rvu)
+{
+ rvu->hw->cap.npc_exact_match_enabled = false;
+}
+
+/**
+ * rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: PCI func to match.
+ */
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *tmp, *iter;
+ u32 seq_id;
+
+ mutex_lock(&table->lock);
+ list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
+ if (pcifunc != iter->pcifunc)
+ continue;
+
+ seq_id = iter->seq_id;
+ dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
+ pcifunc, seq_id);
+
+ mutex_unlock(&table->lock);
+ rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ mutex_lock(&table->lock);
+ }
+ mutex_unlock(&table->lock);
+}
+
+/**
+ * rvu_npc_exact_init - initialize exact match table
+ * @rvu: resource virtualization unit.
+ *
+ * Initialize HW and SW resources to manage 4way-2K table and fully
+ * associative 32-entry mcam table.
+ * Return: 0 upon success.
+ */
+int rvu_npc_exact_init(struct rvu *rvu)
+{
+ u64 bcast_mcast_val, bcast_mcast_mask;
+ struct npc_exact_table *table;
+ u64 exact_val, exact_mask;
+ u64 chan_val, chan_mask;
+ u8 cgx_id, lmac_id;
+ u32 *drop_mcam_idx;
+ u16 max_lmac_cnt;
+ u64 npc_const3;
+ int table_size;
+ int blkaddr;
+ u16 pcifunc;
+ int err, i;
+ u64 cfg;
+ bool rc;
+
+ /* Read NPC_AF_CONST3 and check for have exact
+ * match functionality is present
+ */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check exact match feature is supported */
+ npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62))) {
+ dev_info(rvu->dev, "%s: No support for exact match support\n",
+ __func__);
+ return 0;
+ }
+
+ /* Check if kex profile has enabled EXACT match nibble */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT)) {
+ dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n",
+ __func__);
+ return 0;
+ }
+
+ /* Set capability to true */
+ rvu->hw->cap.npc_exact_match_enabled = true;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
+ memset(table, 0, sizeof(*table));
+ rvu->hw->table = table;
+
+ /* Read table size, ways and depth */
+ table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+ table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
+ table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
+
+ dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
+ __func__, table->mem_table.ways, table->cam_table.depth);
+
+ /* Check if depth of table is not a sequre of 2
+ * TODO: why _builtin_popcount() is not working ?
+ */
+ if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
+ dev_err(rvu->dev,
+ "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
+ __func__, table->mem_table.depth);
+ return -EINVAL;
+ }
+
+ table_size = table->mem_table.depth * table->mem_table.ways;
+
+ /* Allocate bitmap for 4way 2K table */
+ table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size),
+ sizeof(long), GFP_KERNEL);
+ if (!table->mem_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
+
+ /* Allocate bitmap for 32 entry mcam */
+ table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL);
+
+ if (!table->cam_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
+
+ table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth;
+ table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids),
+ table->tot_ids, GFP_KERNEL);
+
+ if (!table->id_bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
+ __func__, table->tot_ids);
+
+ /* Initialize list heads for npc_exact_table entries.
+ * This entry is used by debugfs to show entries in
+ * exact match table.
+ */
+ for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
+ INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
+
+ INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
+ INIT_LIST_HEAD(&table->lhead_gbl);
+
+ mutex_init(&table->lock);
+
+ rvu_exact_config_secret_key(rvu);
+ rvu_exact_config_search_key(rvu);
+
+ rvu_exact_config_table_mask(rvu);
+ rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
+
+ /* - No drop rule for LBK
+ * - Drop rules for SDP and each LMAC.
+ */
+ exact_val = !NPC_EXACT_RESULT_HIT;
+ exact_mask = NPC_EXACT_RESULT_HIT;
+
+ /* nibble - 3 2 1 0
+ * L3B L3M L2B L2M
+ */
+ bcast_mcast_val = 0b0000;
+ bcast_mcast_mask = 0b0011;
+
+ /* Install SDP drop rule */
+ drop_mcam_idx = &table->num_drop_rules;
+
+ max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
+ for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
+ if (rvu->pf2cgxlmac_map[i] == 0xFF)
+ continue;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
+ __func__, chan_val, chan_mask, *drop_mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Filter rules are only for PF */
+ pcifunc = RVU_PFFUNC(i, 0);
+
+ dev_dbg(rvu->dev,
+ "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
+ __func__, cgx_id, lmac_id, chan_val, chan_mask);
+
+ rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
+ chan_val, chan_mask, pcifunc);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
+ __func__, cgx_id, lmac_id, chan_val);
+ return -EINVAL;
+ }
+
+ err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
+ &table->counter_idx[*drop_mcam_idx],
+ chan_val, chan_mask,
+ exact_val, exact_mask,
+ bcast_mcast_val, bcast_mcast_mask);
+ if (err) {
+ dev_err(rvu->dev,
+ "failed to configure drop rule (cgx=%d lmac=%d)\n",
+ cgx_id, lmac_id);
+ return err;
+ }
+
+ (*drop_mcam_idx)++;
+ }
+
+ dev_info(rvu->dev, "initialized exact match table successfully\n");
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
new file mode 100644
index 000000000000..3efeb09c58de
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_HASH_H
+#define __RVU_NPC_HASH_H
+
+#define RVU_NPC_HASH_SECRET_KEY0 0xa9d5af4c9fbc76b1
+#define RVU_NPC_HASH_SECRET_KEY1 0xa9d5af4c9fbc87b4
+#define RVU_NPC_HASH_SECRET_KEY2 0x5954c9e7
+
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+
+#define KEX_LD_CFG_USE_HASH(use_hash, bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ ((use_hash) << 20 | ((bytesm1) << 16) | ((hdr_ofs) << 8) | \
+ ((ena) << 7) | ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+#define KEX_LD_CFG_HASH(hdr_ofs, bytesm1, lt_en, lid_en, lid, ltype_match, ltype_mask) \
+ (((hdr_ofs) << 32) | ((bytesm1) << 16) | \
+ ((lt_en) << 12) | ((lid_en) << 11) | ((lid) << 8) | \
+ ((ltype_match) << 4) | ((ltype_mask) & 0xF))
+
+#define SET_KEX_LD_HASH(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_CFG(intf, ld), cfg)
+
+#define SET_KEX_LD_HASH_MASK(intf, ld, mask_idx, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+
+#define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
+
+struct npc_mcam_kex_hash {
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ bool lid_lt_ld_hash_en[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_CFG */
+ u64 hash[NPC_MAX_INTF][NPC_MAX_HASH];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+} __packed;
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask);
+void npc_config_secret_key(struct rvu *rvu, int blkaddr);
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
+u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+ u64 *secret_key, u8 intf, u8 hash_idx);
+
+static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ .lid_lt_ld_hash_en = {
+ [NIX_INTF_RX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ true,
+ true,
+ },
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ true,
+ true,
+ },
+ },
+ },
+ },
+
+ .hash = {
+ [NIX_INTF_RX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+
+ [NIX_INTF_TX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+ },
+
+ .hash_mask = {
+ [NIX_INTF_RX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+ },
+
+ .hash_ctrl = {
+ [NIX_INTF_RX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+ },
+};
+
+/* If exact match table support is enabled, enable drop rules */
+#define NPC_MCAM_DROP_RULE_MAX 30
+#define NPC_MCAM_SDP_DROP_RULE_IDX 0
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+enum npc_exact_opc_type {
+ NPC_EXACT_OPC_MEM,
+ NPC_EXACT_OPC_CAM,
+};
+
+struct npc_exact_table_entry {
+ struct list_head list;
+ struct list_head glist;
+ u32 seq_id; /* Sequence number of entry */
+ u32 index; /* Mem table or cam table index */
+ u32 mcam_idx;
+ /* Mcam index. This is valid only if "cmd" field is false */
+ enum npc_exact_opc_type opc_type;
+ u16 chan;
+ u16 pcifunc;
+ u8 ways;
+ u8 mac[ETH_ALEN];
+ u8 ctype;
+ u8 cgx_id;
+ u8 lmac_id;
+ bool cmd; /* Is added by ethtool command ? */
+};
+
+struct npc_exact_table {
+ struct mutex lock; /* entries update lock */
+ unsigned long *id_bmap;
+ int num_drop_rules;
+ u32 tot_ids;
+ u16 cnt_cmd_rules[NPC_MCAM_DROP_RULE_MAX];
+ u16 counter_idx[NPC_MCAM_DROP_RULE_MAX];
+ bool promisc_mode[NPC_MCAM_DROP_RULE_MAX];
+ struct {
+ int ways;
+ int depth;
+ unsigned long *bmap;
+ u64 mask; // Masks before hash calculation.
+ u16 hash_mask; // 11 bits for hash mask
+ u16 hash_offset; // 11 bits offset
+ } mem_table;
+
+ struct {
+ int depth;
+ unsigned long *bmap;
+ } cam_table;
+
+ struct {
+ bool valid;
+ u16 chan_val;
+ u16 chan_mask;
+ u16 pcifunc;
+ u8 drop_rule_idx;
+ } drop_rule_map[NPC_MCAM_DROP_RULE_MAX];
+
+#define NPC_EXACT_TBL_MAX_WAYS 4
+
+ struct list_head lhead_mem_tbl_entry[NPC_EXACT_TBL_MAX_WAYS];
+ int mem_tbl_entry_cnt;
+
+ struct list_head lhead_cam_tbl_entry;
+ int cam_tbl_entry_cnt;
+
+ struct list_head lhead_gbl;
+};
+
+bool rvu_npc_exact_has_match_table(struct rvu *rvu);
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu);
+int rvu_npc_exact_init(struct rvu *rvu);
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx);
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc);
+#endif /* RVU_NPC_HASH_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 22cd751613cd..77a9ade91f3e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -565,7 +565,13 @@
#define NPC_AF_PCK_DEF_OIP4 (0x00620)
#define NPC_AF_PCK_DEF_OIP6 (0x00630)
#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_INTFX_HASHX_RESULT_CTRL(a, b) (0x006c0 | (a) << 4 | (b) << 3)
+#define NPC_AF_INTFX_HASHX_MASKX(a, b, c) (0x00700 | (a) << 5 | (b) << 4 | (c) << 3)
#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_HASHX_CFG(a, b) (0x00b00 | (a) << 6 | (b) << 4)
+#define NPC_AF_INTFX_SECRET_KEY0(a) (0x00e00 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY1(a) (0x00e20 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY2(a) (0x00e40 | (a) << 3)
#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
@@ -599,6 +605,15 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+#define NPC_AF_EXACT_MEM_ENTRY(a, b) (0x300000 | (a) << 15 | (b) << 3)
+#define NPC_AF_EXACT_CAM_ENTRY(a) (0xC00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_MASK(a) (0x660 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_RESULT_CTL(a)(0x680 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_CFG(a) (0xA00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET0(a) (0xE00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET1(a) (0xE20 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET2(a) (0xE40 | (a) << 3)
+
#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
u64 offset; \
\
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index ce2766317c0b..e795f9ee76dd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -314,8 +314,8 @@ struct otx2_flow_config {
#define OTX2_VF_VLAN_TX_INDEX 1
u16 max_flows;
u8 dmacflt_max_flows;
- u8 *bmap_to_dmacindex;
- unsigned long dmacflt_bmap;
+ u32 *bmap_to_dmacindex;
+ unsigned long *dmacflt_bmap;
struct list_head flow_list;
};
@@ -895,9 +895,9 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
-int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
-int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
-int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
index 2ec800f741d8..80d853b343f9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -8,7 +8,7 @@
#include "otx2_common.h"
static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
- u8 *dmac_index)
+ u32 *dmac_index)
{
struct cgx_mac_addr_add_req *req;
struct cgx_mac_addr_add_rsp *rsp;
@@ -35,9 +35,10 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
return err;
}
-static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index)
{
struct cgx_mac_addr_set_or_get *req;
+ struct cgx_mac_addr_set_or_get *rsp;
int err;
mutex_lock(&pf->mbox.lock);
@@ -48,16 +49,31 @@ static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
return -ENOMEM;
}
+ req->index = *dmac_index;
+
ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_set_or_get *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ *dmac_index = rsp->index;
+out:
mutex_unlock(&pf->mbox.lock);
return err;
}
-int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos)
{
- u8 *dmacindex;
+ u32 *dmacindex;
/* Store dmacindex returned by CGX/RPM driver which will
* be used for macaddr update/remove
@@ -65,13 +81,13 @@ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
- return otx2_dmacflt_add_pfmac(pf);
+ return otx2_dmacflt_add_pfmac(pf, dmacindex);
else
return otx2_dmacflt_do_add(pf, mac, dmacindex);
}
static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
- u8 dmac_index)
+ u32 dmac_index)
{
struct cgx_mac_addr_del_req *req;
int err;
@@ -91,9 +107,9 @@ static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
return err;
}
-static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index)
{
- struct msg_req *req;
+ struct cgx_mac_addr_reset_req *req;
int err;
mutex_lock(&pf->mbox.lock);
@@ -102,6 +118,7 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
+ req->index = dmac_index;
err = otx2_sync_mbox_msg(&pf->mbox);
@@ -110,12 +127,12 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
}
int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
- u8 bit_pos)
+ u32 bit_pos)
{
- u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
- return otx2_dmacflt_remove_pfmac(pf);
+ return otx2_dmacflt_remove_pfmac(pf, dmacindex);
else
return otx2_dmacflt_do_remove(pf, mac, dmacindex);
}
@@ -144,6 +161,12 @@ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
rsp = (struct cgx_max_dmac_entries_get_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
out:
@@ -151,9 +174,10 @@ out:
return err;
}
-int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
{
struct cgx_mac_addr_update_req *req;
+ struct cgx_mac_addr_update_rsp *rsp;
int rc;
mutex_lock(&pf->mbox.lock);
@@ -167,8 +191,19 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
ether_addr_copy(req->mac_addr, mac);
req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ /* check the response and change index */
+
rc = otx2_sync_mbox_msg(&pf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_update_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
+
+out:
mutex_unlock(&pf->mbox.lock);
return rc;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 2dd192b5e4e0..709fc0114fbd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,7 +18,7 @@ struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
u32 location;
- u16 entry;
+ u32 entry;
bool is_vf;
u8 rss_ctx_id;
#define DMAC_FILTER_RULE BIT(0)
@@ -232,6 +232,9 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
return 0;
}
+/* TODO : revisit on size */
+#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
+
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg;
@@ -242,6 +245,12 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
if (!pfvf->flow_cfg)
return -ENOMEM;
+ pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pfvf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
flow_cfg->max_flows = 0;
@@ -259,6 +268,12 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->flow_cfg)
return -ENOMEM;
+ pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
/* Allocate bare minimum number of MCAM entries needed for
@@ -284,7 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return 0;
pf->flow_cfg->bmap_to_dmacindex =
- devm_kzalloc(pf->dev, sizeof(u8) *
+ devm_kzalloc(pf->dev, sizeof(u32) *
pf->flow_cfg->dmacflt_max_flows,
GFP_KERNEL);
@@ -355,7 +370,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
- if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
+ if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(netdev,
"Add %pM to CGX/RPM DMAC filters list as well\n",
@@ -438,7 +453,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
return 0;
if (flow_cfg->nr_flows == flow_cfg->max_flows ||
- !bitmap_empty(&flow_cfg->dmacflt_bmap,
+ !bitmap_empty(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
else
@@ -1010,7 +1025,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
otx2_add_flow_to_list(pfvf, pf_mac);
pfvf->flow_cfg->nr_flows++;
- set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
return 0;
}
@@ -1064,7 +1079,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
flow->entry);
- if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ if (bitmap_full(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows)) {
netdev_warn(pfvf->netdev,
"Can't insert the rule %d as max allowed dmac filters are %d\n",
@@ -1078,17 +1093,17 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
}
/* Install PF mac address to DMAC filter list */
- if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ if (!test_bit(0, flow_cfg->dmacflt_bmap))
otx2_add_flow_with_pfmac(pfvf, flow);
flow->rule_type |= DMAC_FILTER_RULE;
- flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->max_flows + flow->entry;
flow->flow_spec.location = fsp->location;
flow->location = fsp->location;
- set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ set_bit(flow->entry, flow_cfg->dmacflt_bmap);
otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
} else {
@@ -1154,11 +1169,12 @@ static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
if (req == DMAC_ADDR_DEL) {
otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
0);
- clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
found = true;
} else {
ether_addr_copy(eth_hdr->h_dest,
pfvf->netdev->dev_addr);
+
otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
}
break;
@@ -1194,12 +1210,12 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
flow->entry);
- clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
/* If all dmac filters are removed delete macfilter with
* interface mac address and configure CGX/RPM block in
* promiscuous mode
*/
- if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ if (bitmap_weight(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows) == 1)
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 9106c359e64c..9376d0e62914 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1120,7 +1120,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
+ if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(pf->netdev,
"CGX/RPM internal loopback might not work as DMAC filters are active\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 3baeafc40807..a18e8efd0f1e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -624,7 +624,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->subdc = NIX_SUBDC_EXT;
if (skb_shinfo(skb)->gso_size) {
ext->lso = 1;
- ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_sb = skb_tcp_all_headers(skb);
ext->lso_mps = skb_shinfo(skb)->gso_size;
/* Only TSOv4 and TSOv6 GSO offloads are supported */
@@ -931,7 +931,7 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
* be correctly modified, hence don't offload such TSO segments.
*/
- payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ payload_len = skb->len - skb_tcp_all_headers(skb);
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index c88e8a436029..fbe62bbfb789 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -21,7 +21,7 @@
#define OTX2_HEAD_ROOM OTX2_ALIGN
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
-#define OTX2_MIN_MTU 64
+#define OTX2_MIN_MTU 60
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
index b6f20e2034c6..f2f7663c3d10 100644
--- a/drivers/net/ethernet/marvell/prestera/Kconfig
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -8,6 +8,7 @@ config PRESTERA
depends on NET_SWITCHDEV && VLAN_8021Q
depends on BRIDGE || BRIDGE=n
select NET_DEVLINK
+ select PHYLINK
help
This driver supports Marvell Prestera Switch ASICs family.
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 6f754ae2a584..2f84d0fb4094 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -7,6 +7,7 @@
#include <linux/notifier.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/phylink.h>
#include <net/devlink.h>
#include <uapi/linux/if_ether.h>
@@ -20,6 +21,26 @@ struct prestera_fw_rev {
u16 sub;
};
+struct prestera_flood_domain {
+ struct prestera_switch *sw;
+ struct list_head flood_domain_port_list;
+ u32 idx;
+};
+
+struct prestera_mdb_entry {
+ struct prestera_switch *sw;
+ struct prestera_flood_domain *flood_domain;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+struct prestera_flood_domain_port {
+ struct prestera_flood_domain *flood_domain;
+ struct net_device *dev;
+ struct list_head flood_domain_port_node;
+ u16 vid;
+};
+
struct prestera_port_stats {
u64 good_octets_received;
u64 bad_octets_received;
@@ -72,6 +93,7 @@ struct prestera_lag {
struct prestera_flow_block;
struct prestera_port_mac_state {
+ bool valid;
u32 mode;
u32 speed;
bool oper;
@@ -107,7 +129,8 @@ struct prestera_port_phy_config {
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
- struct prestera_flow_block *flow_block;
+ struct prestera_flow_block *ingress_flow_block;
+ struct prestera_flow_block *egress_flow_block;
struct devlink_port dl_port;
struct list_head lag_member;
struct prestera_lag *lag;
@@ -130,6 +153,13 @@ struct prestera_port {
struct prestera_port_phy_config cfg_phy;
struct prestera_port_mac_state state_mac;
struct prestera_port_phy_state state_phy;
+
+ struct phylink_config phy_config;
+ struct phylink *phy_link;
+ struct phylink_pcs phylink_pcs;
+
+ /* protects state_mac */
+ spinlock_t state_mac_lock;
};
struct prestera_device {
@@ -270,6 +300,7 @@ struct prestera_switch {
u32 mtu_min;
u32 mtu_max;
u8 id;
+ struct device_node *np;
struct prestera_router *router;
struct prestera_lag *lags;
struct prestera_counter *counter;
@@ -320,6 +351,8 @@ void prestera_router_fini(struct prestera_switch *sw);
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+struct prestera_switch *prestera_switch_get(struct net_device *dev);
+
int prestera_port_cfg_mac_read(struct prestera_port *port,
struct prestera_port_mac_config *cfg);
@@ -330,6 +363,10 @@ struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
void prestera_queue_work(struct work_struct *work);
+int prestera_port_learning_set(struct prestera_port *port, bool learn_enable);
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood);
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
@@ -337,9 +374,30 @@ bool prestera_netdev_check(const struct net_device *dev);
int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr);
bool prestera_port_is_lag_member(const struct prestera_port *port);
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id);
struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
u16 prestera_port_lag_id(const struct prestera_port *port);
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid);
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry);
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw);
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain);
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid);
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port);
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid);
+
#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 3a141f2db812..3d4b85f2d541 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -61,6 +61,7 @@ struct prestera_acl_ruleset {
u32 index;
u16 pcl_id;
bool offload;
+ bool ingress;
};
struct prestera_acl_vtcam {
@@ -70,6 +71,7 @@ struct prestera_acl_vtcam {
u32 id;
bool is_keymask_set;
u8 lookup;
+ u8 direction;
};
static const struct rhashtable_params prestera_acl_ruleset_ht_params = {
@@ -93,23 +95,36 @@ static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
.automatic_shrinking = true,
};
-int prestera_acl_chain_to_client(u32 chain_index, u32 *client)
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client)
{
- static const u32 client_map[] = {
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2
+ static const u32 ingress_client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2
};
- if (chain_index >= ARRAY_SIZE(client_map))
+ if (!ingress) {
+ /* prestera supports only one chain on egress */
+ if (chain_index > 0)
+ return -EINVAL;
+
+ *client = PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP;
+ return 0;
+ }
+
+ if (chain_index >= ARRAY_SIZE(ingress_client_map))
return -EINVAL;
- *client = client_map[chain_index];
+ *client = ingress_client_map[chain_index];
return 0;
}
-static bool prestera_acl_chain_is_supported(u32 chain_index)
+static bool prestera_acl_chain_is_supported(u32 chain_index, bool ingress)
{
+ if (!ingress)
+ /* prestera supports only one chain on egress */
+ return chain_index == 0;
+
return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
}
@@ -122,7 +137,7 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
u32 uid = 0;
int err;
- if (!prestera_acl_chain_is_supported(chain_index))
+ if (!prestera_acl_chain_is_supported(chain_index, block->ingress))
return ERR_PTR(-EINVAL);
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
@@ -130,6 +145,7 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
return ERR_PTR(-ENOMEM);
ruleset->acl = acl;
+ ruleset->ingress = block->ingress;
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
refcount_set(&ruleset->refcount, 1);
@@ -172,13 +188,18 @@ int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
struct prestera_acl_iface iface;
u32 vtcam_id;
+ int dir;
int err;
+ dir = ruleset->ingress ?
+ PRESTERA_HW_VTCAM_DIR_INGRESS : PRESTERA_HW_VTCAM_DIR_EGRESS;
+
if (ruleset->offload)
return -EEXIST;
err = prestera_acl_vtcam_id_get(ruleset->acl,
ruleset->ht_key.chain_index,
+ dir,
ruleset->keymask, &vtcam_id);
if (err)
goto err_vtcam_create;
@@ -719,7 +740,7 @@ vtcam_found:
return 0;
}
-int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
void *keymask, u32 *vtcam_id)
{
struct prestera_acl_vtcam *vtcam;
@@ -731,7 +752,8 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
* fine for now
*/
list_for_each_entry(vtcam, &acl->vtcam_list, list) {
- if (lookup != vtcam->lookup)
+ if (lookup != vtcam->lookup ||
+ dir != vtcam->direction)
continue;
if (!keymask && !vtcam->is_keymask_set) {
@@ -752,7 +774,7 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
return -ENOMEM;
err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id,
- PRESTERA_HW_VTCAM_DIR_INGRESS);
+ dir);
if (err) {
kfree(vtcam);
@@ -765,6 +787,7 @@ int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
return 0;
}
+ vtcam->direction = dir;
vtcam->id = new_vtcam_id;
vtcam->lookup = lookup;
if (keymask) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index f963e1e0c0f0..03fc5b9dc925 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -199,9 +199,9 @@ void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
-int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
void *keymask, u32 *vtcam_id);
int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
-int prestera_acl_chain_to_client(u32 chain_index, u32 *client);
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 40d5b89573bb..1da7ff889417 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -521,6 +521,9 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
ecmd->base.speed = SPEED_UNKNOWN;
ecmd->base.duplex = DUPLEX_UNKNOWN;
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_get(port->phy_link, ecmd);
+
ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
@@ -648,6 +651,9 @@ prestera_ethtool_set_link_ksettings(struct net_device *dev,
u8 adver_fec;
int err;
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_set(port->phy_link, ecmd);
+
err = prestera_port_type_set(ecmd, port);
if (err)
return err;
@@ -782,28 +788,6 @@ static int prestera_ethtool_nway_reset(struct net_device *dev)
return -EINVAL;
}
-void prestera_ethtool_port_state_changed(struct prestera_port *port,
- struct prestera_port_event *evt)
-{
- struct prestera_port_mac_state *smac = &port->state_mac;
-
- smac->oper = evt->data.mac.oper;
-
- if (smac->oper) {
- smac->mode = evt->data.mac.mode;
- smac->speed = evt->data.mac.speed;
- smac->duplex = evt->data.mac.duplex;
- smac->fc = evt->data.mac.fc;
- smac->fec = evt->data.mac.fec;
- } else {
- smac->mode = PRESTERA_MAC_MODE_MAX;
- smac->speed = SPEED_UNKNOWN;
- smac->duplex = DUPLEX_UNKNOWN;
- smac->fc = 0;
- smac->fec = 0;
- }
-}
-
const struct ethtool_ops prestera_ethtool_ops = {
.get_drvinfo = prestera_ethtool_get_drvinfo,
.get_link_ksettings = prestera_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
index 9eb18e99dea6..bd5600886bc6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -11,7 +11,4 @@ struct prestera_port;
extern const struct ethtool_ops prestera_ethtool_ops;
-void prestera_ethtool_port_state_changed(struct prestera_port *port,
- struct prestera_port_event *evt);
-
#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index 05c3ad98eba9..2262693bd5cf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -75,7 +75,9 @@ static void prestera_flow_block_destroy(void *cb_priv)
}
static struct prestera_flow_block *
-prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
+prestera_flow_block_create(struct prestera_switch *sw,
+ struct net *net,
+ bool ingress)
{
struct prestera_flow_block *block;
@@ -87,6 +89,7 @@ prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
+ block->ingress = ingress;
return block;
}
@@ -165,7 +168,8 @@ static int prestera_flow_block_unbind(struct prestera_flow_block *block,
static struct prestera_flow_block *
prestera_flow_block_get(struct prestera_switch *sw,
struct flow_block_offload *f,
- bool *register_block)
+ bool *register_block,
+ bool ingress)
{
struct prestera_flow_block *block;
struct flow_block_cb *block_cb;
@@ -173,7 +177,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
block_cb = flow_block_cb_lookup(f->block,
prestera_flow_block_cb, sw);
if (!block_cb) {
- block = prestera_flow_block_create(sw, f->net);
+ block = prestera_flow_block_create(sw, f->net, ingress);
if (!block)
return ERR_PTR(-ENOMEM);
@@ -209,7 +213,7 @@ static void prestera_flow_block_put(struct prestera_flow_block *block)
}
static int prestera_setup_flow_block_bind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -217,7 +221,7 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
bool register_block;
int err;
- block = prestera_flow_block_get(sw, f, &register_block);
+ block = prestera_flow_block_get(sw, f, &register_block, ingress);
if (IS_ERR(block))
return PTR_ERR(block);
@@ -232,7 +236,11 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
list_add_tail(&block_cb->driver_list, &prestera_block_cb_list);
}
- port->flow_block = block;
+ if (ingress)
+ port->ingress_flow_block = block;
+ else
+ port->egress_flow_block = block;
+
return 0;
err_block_bind:
@@ -242,7 +250,7 @@ err_block_bind:
}
static void prestera_setup_flow_block_unbind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -266,24 +274,38 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
list_del(&block_cb->driver_list);
}
error:
- port->flow_block = NULL;
+ if (ingress)
+ port->ingress_flow_block = NULL;
+ else
+ port->egress_flow_block = NULL;
}
-int prestera_flow_block_setup(struct prestera_port *port,
- struct flow_block_offload *f)
+static int prestera_setup_flow_block_clsact(struct prestera_port *port,
+ struct flow_block_offload *f,
+ bool ingress)
{
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
f->driver_block_list = &prestera_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return prestera_setup_flow_block_bind(port, f);
+ return prestera_setup_flow_block_bind(port, f, ingress);
case FLOW_BLOCK_UNBIND:
- prestera_setup_flow_block_unbind(port, f);
+ prestera_setup_flow_block_unbind(port, f, ingress);
return 0;
default:
return -EOPNOTSUPP;
}
}
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ switch (f->binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ return prestera_setup_flow_block_clsact(port, f, true);
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ return prestera_setup_flow_block_clsact(port, f, false);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 6550278b166a..0c9e13263261 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -23,6 +23,7 @@ struct prestera_flow_block {
struct flow_block_cb *block_cb;
struct list_head template_list;
unsigned int rule_count;
+ bool ingress;
};
int prestera_flow_block_setup(struct prestera_port *port,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 4d93ad6a284c..19d3b55c578e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -79,7 +79,7 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
} else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
/* setup counter first */
rule->re_arg.count.valid = true;
- err = prestera_acl_chain_to_client(chain_index,
+ err = prestera_acl_chain_to_client(chain_index, block->ingress,
&rule->re_arg.count.client);
if (err)
return err;
@@ -116,7 +116,7 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
rule->re_arg.police.rate =
act->police.rate_bytes_ps;
rule->re_arg.police.burst = act->police.burst;
- rule->re_arg.police.ingress = true;
+ rule->re_arg.police.ingress = block->ingress;
break;
case FLOW_ACTION_GOTO:
err = prestera_flower_parse_goto_action(block, rule,
@@ -138,7 +138,8 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
struct flow_cls_offload *f,
struct prestera_flow_block *block)
-{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct prestera_acl_match *r_match = &rule->re_key.match;
struct prestera_port *port;
struct net_device *ingress_dev;
@@ -178,13 +179,13 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
rule_match_set(r_match->mask, SYS_DEV, mask);
return 0;
-
}
static int prestera_flower_parse(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_cls_offload *f)
-{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = f_rule->match.dissector;
struct prestera_acl_match *r_match = &rule->re_key.match;
__be16 n_proto_mask = 0;
@@ -202,6 +203,7 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ICMP) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
@@ -301,6 +303,29 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
}
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
+ struct flow_match_ports_range match;
+ __be32 tp_key, tp_mask;
+
+ flow_rule_match_ports_range(f_rule, &match);
+
+ /* src port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.src) |
+ (ntohs(match.key->tp_max.src) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.src) |
+ (ntohs(match.mask->tp_max.src) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask);
+
+ /* dst port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.dst) |
+ (ntohs(match.key->tp_max.dst) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.dst) |
+ (ntohs(match.mask->tp_max.dst) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask);
+ }
+
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
@@ -397,7 +422,6 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
prestera_acl_rule_destroy(rule);
}
prestera_acl_ruleset_put(ruleset);
-
}
int prestera_flower_tmplt_create(struct prestera_flow_block *block,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 79fd3cac539d..962d7e0c0cb5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -60,6 +60,14 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE = 0x700,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY = 0x701,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET = 0x702,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET = 0x703,
+
+ PRESTERA_CMD_TYPE_MDB_CREATE = 0x704,
+ PRESTERA_CMD_TYPE_MDB_DESTROY = 0x705,
+
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
@@ -185,6 +193,12 @@ struct prestera_fw_event_handler {
void *arg;
};
+enum {
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_REG_PORT = 0,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG = 1,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_MAX = 2,
+};
+
struct prestera_msg_cmd {
__le32 type;
};
@@ -627,6 +641,57 @@ struct prestera_msg_event_fdb {
u8 dest_type;
};
+struct prestera_msg_flood_domain_create_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_flood_domain_create_resp {
+ struct prestera_msg_ret ret;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_ports_set_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le32 ports_num;
+};
+
+struct prestera_msg_flood_domain_ports_reset_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_port {
+ union {
+ struct {
+ __le32 port_num;
+ __le32 dev_num;
+ };
+ __le16 lag_id;
+ };
+ __le16 vid;
+ __le16 port_type;
+};
+
+struct prestera_msg_mdb_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_mdb_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
static void prestera_hw_build_tests(void)
{
/* check requests */
@@ -654,10 +719,17 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_req) != 4);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_set_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -1531,7 +1603,7 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
@@ -1549,7 +1621,7 @@ static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
@@ -1567,56 +1639,6 @@ static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
- .port = __cpu_to_le32(port->hw_id),
- .dev = __cpu_to_le32(port->dev_id),
- .param = {
- .flood = flood,
- }
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
-int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
- unsigned long val)
-{
- int err;
-
- if (port->sw->dev->fw_rev.maj <= 2) {
- if (!(mask & BR_FLOOD))
- return 0;
-
- return prestera_hw_port_flood_set_v2(port, val & BR_FLOOD);
- }
-
- if (mask & BR_FLOOD) {
- err = prestera_hw_port_uc_flood_set(port, val & BR_FLOOD);
- if (err)
- goto err_uc_flood;
- }
-
- if (mask & BR_MCAST_FLOOD) {
- err = prestera_hw_port_mc_flood_set(port, val & BR_MCAST_FLOOD);
- if (err)
- goto err_mc_flood;
- }
-
- return 0;
-
-err_mc_flood:
- prestera_hw_port_mc_flood_set(port, 0);
-err_uc_flood:
- if (mask & BR_FLOOD)
- prestera_hw_port_uc_flood_set(port, 0);
-
- return err;
-}
-
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
@@ -2244,3 +2266,133 @@ int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET,
&req.cmd, sizeof(req));
}
+
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_create_resp resp;
+ struct prestera_msg_flood_domain_create_req req;
+ int err;
+
+ err = prestera_cmd_ret(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE, &req.cmd,
+ sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ domain->idx = __le32_to_cpu(resp.flood_domain_idx);
+
+ return 0;
+}
+
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ struct prestera_msg_flood_domain_ports_set_req *req;
+ struct prestera_msg_flood_domain_port *ports;
+ struct prestera_switch *sw = domain->sw;
+ struct prestera_port *port;
+ u32 ports_num = 0;
+ int buf_size;
+ void *buff;
+ u16 lag_id;
+ int err;
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node)
+ ports_num++;
+
+ if (!ports_num)
+ return -EINVAL;
+
+ buf_size = sizeof(*req) + sizeof(*ports) * ports_num;
+
+ buff = kmalloc(buf_size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ req = buff;
+ ports = buff + sizeof(*req);
+
+ req->flood_domain_idx = __cpu_to_le32(domain->idx);
+ req->ports_num = __cpu_to_le32(ports_num);
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node) {
+ if (netif_is_lag_master(flood_domain_port->dev)) {
+ if (prestera_lag_id(sw, flood_domain_port->dev,
+ &lag_id)) {
+ kfree(buff);
+ return -EINVAL;
+ }
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
+ ports->lag_id = __cpu_to_le16(lag_id);
+ } else {
+ port = prestera_port_dev_lower_find(flood_domain_port->dev);
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
+ ports->dev_num = __cpu_to_le32(port->dev_id);
+ ports->port_num = __cpu_to_le32(port->hw_id);
+ }
+
+ ports->vid = __cpu_to_le16(flood_domain_port->vid);
+
+ ports++;
+ }
+
+ err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
+ &req->cmd, buf_size);
+
+ kfree(buff);
+
+ return err;
+}
+
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_ports_reset_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_create_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_CREATE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_DESTROY, &req.cmd,
+ sizeof(req));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 579d9ba23ffc..56e043146dd2 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -123,9 +123,10 @@ enum prestera_hw_vtcam_direction_t {
};
enum {
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0 = 0,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1 = 1,
- PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0 = 0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1 = 1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP = 3,
};
struct prestera_switch;
@@ -143,6 +144,8 @@ struct prestera_acl_hw_action_info;
struct prestera_acl_iface;
struct prestera_counter_stats;
struct prestera_iface;
+struct prestera_flood_domain;
+struct prestera_mdb_entry;
/* Switch API */
int prestera_hw_switch_init(struct prestera_switch *sw);
@@ -178,8 +181,8 @@ int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *stats);
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
-int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
- unsigned long val);
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood);
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type);
/* Vlan API */
@@ -301,4 +304,13 @@ int prestera_hw_policer_release(struct prestera_switch *sw,
int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
u32 policer_id, u64 cir, u32 cbs);
+/* Flood domain / MDB API */
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain);
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb);
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb);
+
#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 3952fdcc9240..ede3e53b9790 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -9,6 +9,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
#include "prestera.h"
#include "prestera_hw.h"
@@ -35,6 +36,21 @@ void prestera_queue_work(struct work_struct *work)
queue_work(prestera_owq, work);
}
+int prestera_port_learning_set(struct prestera_port *port, bool learn)
+{
+ return prestera_hw_port_learning_set(port, learn);
+}
+
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_uc_flood_set(port, flood);
+}
+
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_mc_flood_set(port, flood);
+}
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
enum prestera_accept_frm_type frm_type;
@@ -91,6 +107,14 @@ struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
return port;
}
+struct prestera_switch *prestera_switch_get(struct net_device *dev)
+{
+ struct prestera_port *port;
+
+ port = prestera_port_dev_lower_find(dev);
+ return port ? port->sw : NULL;
+}
+
int prestera_port_cfg_mac_read(struct prestera_port *port,
struct prestera_port_mac_config *cfg)
{
@@ -119,18 +143,24 @@ static int prestera_port_open(struct net_device *dev)
struct prestera_port_mac_config cfg_mac;
int err = 0;
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
- err = prestera_port_cfg_mac_read(port, &cfg_mac);
- if (!err) {
- cfg_mac.admin = true;
- err = prestera_port_cfg_mac_write(port, &cfg_mac);
- }
+ if (port->phy_link) {
+ phylink_start(port->phy_link);
} else {
- port->cfg_phy.admin = true;
- err = prestera_hw_port_phy_mode_set(port, true, port->autoneg,
- port->cfg_phy.mode,
- port->adver_link_modes,
- port->cfg_phy.mdix);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = true;
+ err = prestera_port_cfg_mac_write(port,
+ &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = true;
+ err = prestera_hw_port_phy_mode_set(port, true,
+ port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
}
netif_start_queue(dev);
@@ -146,23 +176,259 @@ static int prestera_port_close(struct net_device *dev)
netif_stop_queue(dev);
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ if (port->phy_link) {
+ phylink_stop(port->phy_link);
+ phylink_disconnect_phy(port->phy_link);
err = prestera_port_cfg_mac_read(port, &cfg_mac);
if (!err) {
cfg_mac.admin = false;
prestera_port_cfg_mac_write(port, &cfg_mac);
}
} else {
- port->cfg_phy.admin = false;
- err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
- port->cfg_phy.mode,
- port->adver_link_modes,
- port->cfg_phy.mdix);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = false;
+ prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
}
return err;
}
+static void
+prestera_port_mac_state_cache_read(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ *state = port->state_mac;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static void
+prestera_port_mac_state_cache_write(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ port->state_mac = *state;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static struct prestera_port *prestera_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct prestera_port, phylink_pcs);
+}
+
+static void prestera_mac_config(struct phylink_config *config,
+ unsigned int an_mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void prestera_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(ndev);
+ struct prestera_port_mac_state state_mac;
+
+ /* Invalidate. Parameters will update on next link event. */
+ memset(&state_mac, 0, sizeof(state_mac));
+ state_mac.valid = false;
+ prestera_port_mac_state_cache_write(port, &state_mac);
+}
+
+static void prestera_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+}
+
+static struct phylink_pcs *
+prestera_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *dev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->phylink_pcs;
+}
+
+static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct prestera_port *port = container_of(pcs, struct prestera_port,
+ phylink_pcs);
+ struct prestera_port_mac_state smac;
+
+ prestera_port_mac_state_cache_read(port, &smac);
+
+ if (smac.valid) {
+ state->link = smac.oper ? 1 : 0;
+ /* AN is completed, when port is up */
+ state->an_complete = (smac.oper && port->autoneg) ? 1 : 0;
+ state->speed = smac.speed;
+ state->duplex = smac.duplex;
+ } else {
+ state->link = 0;
+ state->an_complete = 0;
+ }
+}
+
+static int prestera_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct prestera_port *port = prestera_pcs_to_port(pcs);
+ struct prestera_port_mac_config cfg_mac;
+ int err;
+
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (err)
+ return err;
+
+ cfg_mac.admin = true;
+ cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ cfg_mac.speed = SPEED_10000;
+ cfg_mac.inband = 0;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SR_LR;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ cfg_mac.speed = SPEED_2500;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.inband = test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ cfg_mac.inband = 1;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ default:
+ cfg_mac.speed = SPEED_1000;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.inband = test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ cfg_mac.mode = PRESTERA_MAC_MODE_1000BASE_X;
+ break;
+ }
+
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void prestera_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ /* TODO: add 1000basex AN restart support
+ * (Currently FW has no support for 1000baseX AN restart, but it will in the future,
+ * so as for now the function would stay empty.)
+ */
+}
+
+static const struct phylink_mac_ops prestera_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = prestera_mac_select_pcs,
+ .mac_config = prestera_mac_config,
+ .mac_link_down = prestera_mac_link_down,
+ .mac_link_up = prestera_mac_link_up,
+};
+
+static const struct phylink_pcs_ops prestera_pcs_ops = {
+ .pcs_get_state = prestera_pcs_get_state,
+ .pcs_config = prestera_pcs_config,
+ .pcs_an_restart = prestera_pcs_an_restart,
+};
+
+static int prestera_port_sfp_bind(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct device_node *ports, *node;
+ struct fwnode_handle *fwnode;
+ struct phylink *phy_link;
+ int err;
+
+ if (!sw->np)
+ return 0;
+
+ ports = of_find_node_by_name(sw->np, "ports");
+
+ for_each_child_of_node(ports, node) {
+ int num;
+
+ err = of_property_read_u32(node, "prestera,port-num", &num);
+ if (err) {
+ dev_err(sw->dev->dev,
+ "device node %pOF has no valid reg property: %d\n",
+ node, err);
+ goto out;
+ }
+
+ if (port->fp_id != num)
+ continue;
+
+ port->phylink_pcs.ops = &prestera_pcs_ops;
+
+ port->phy_config.dev = &port->dev->dev;
+ port->phy_config.type = PHYLINK_NETDEV;
+
+ fwnode = of_fwnode_handle(node);
+
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phy_config.supported_interfaces);
+
+ port->phy_config.mac_capabilities =
+ MAC_1000 | MAC_2500FD | MAC_10000FD;
+
+ phy_link = phylink_create(&port->phy_config, fwnode,
+ PHY_INTERFACE_MODE_INTERNAL,
+ &prestera_mac_ops);
+ if (IS_ERR(phy_link)) {
+ netdev_err(port->dev, "failed to create phylink\n");
+ err = PTR_ERR(phy_link);
+ goto out;
+ }
+
+ port->phy_link = phy_link;
+ break;
+ }
+
+out:
+ of_node_put(ports);
+ return err;
+}
+
+static int prestera_port_sfp_unbind(struct prestera_port *port)
+{
+ if (port->phy_link)
+ phylink_destroy(port->phy_link);
+
+ return 0;
+}
+
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -343,6 +609,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
port->id = id;
port->sw = sw;
+ spin_lock_init(&port->state_mac_lock);
+
err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
&port->fp_id);
if (err) {
@@ -357,8 +625,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
+ SET_NETDEV_DEV(dev, sw->dev->dev);
- netif_carrier_off(dev);
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP)
+ netif_carrier_off(dev);
dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
dev->min_mtu = sw->mtu_min;
@@ -409,7 +679,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
cfg_mac.admin = false;
cfg_mac.mode = PRESTERA_MAC_MODE_MAX;
}
- cfg_mac.inband = false;
+ cfg_mac.inband = 0;
cfg_mac.speed = 0;
cfg_mac.duplex = DUPLEX_UNKNOWN;
cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
@@ -451,8 +721,13 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
prestera_devlink_port_set(port);
+ err = prestera_port_sfp_bind(port);
+ if (err)
+ goto err_sfp_bind;
+
return 0;
+err_sfp_bind:
err_register_netdev:
prestera_port_list_del(port);
err_port_init:
@@ -498,8 +773,10 @@ static int prestera_create_ports(struct prestera_switch *sw)
return 0;
err_port_create:
- list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list) {
+ prestera_port_sfp_unbind(port);
prestera_port_destroy(port);
+ }
return err;
}
@@ -507,25 +784,47 @@ err_port_create:
static void prestera_port_handle_event(struct prestera_switch *sw,
struct prestera_event *evt, void *arg)
{
+ struct prestera_port_mac_state smac;
+ struct prestera_port_event *pevt;
struct delayed_work *caching_dw;
struct prestera_port *port;
- port = prestera_find_port(sw, evt->port_evt.port_id);
- if (!port || !port->dev)
- return;
-
- caching_dw = &port->cached_hw_stats.caching_dw;
-
- prestera_ethtool_port_state_changed(port, &evt->port_evt);
-
if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ pevt = &evt->port_evt;
+ port = prestera_find_port(sw, pevt->port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ if (port->phy_link) {
+ memset(&smac, 0, sizeof(smac));
+ smac.valid = true;
+ smac.oper = pevt->data.mac.oper;
+ if (smac.oper) {
+ smac.mode = pevt->data.mac.mode;
+ smac.speed = pevt->data.mac.speed;
+ smac.duplex = pevt->data.mac.duplex;
+ smac.fc = pevt->data.mac.fc;
+ smac.fec = pevt->data.mac.fec;
+ phylink_mac_change(port->phy_link, true);
+ } else {
+ phylink_mac_change(port->phy_link, false);
+ }
+ prestera_port_mac_state_cache_write(port, &smac);
+ }
+
if (port->state_mac.oper) {
- netif_carrier_on(port->dev);
+ if (!port->phy_link)
+ netif_carrier_on(port->dev);
+
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
} else if (netif_running(port->dev) &&
netif_carrier_ok(port->dev)) {
- netif_carrier_off(port->dev);
+ if (!port->phy_link)
+ netif_carrier_off(port->dev);
+
if (delayed_work_pending(caching_dw))
cancel_delayed_work(caching_dw);
}
@@ -548,19 +847,20 @@ static void prestera_event_handlers_unregister(struct prestera_switch *sw)
static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
{
struct device_node *base_mac_np;
- struct device_node *np;
- int ret;
+ int ret = 0;
- np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
- base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
+ if (sw->np) {
+ base_mac_np = of_parse_phandle(sw->np, "base-mac-provider", 0);
+ if (base_mac_np) {
+ ret = of_get_mac_address(base_mac_np, sw->base_mac);
+ of_node_put(base_mac_np);
+ }
+ }
- ret = of_get_mac_address(base_mac_np, sw->base_mac);
- if (ret) {
+ if (!is_valid_ether_addr(sw->base_mac) || ret) {
eth_random_addr(sw->base_mac);
dev_info(prestera_dev(sw), "using random base mac address\n");
}
- of_node_put(base_mac_np);
- of_node_put(np);
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
@@ -585,6 +885,30 @@ static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw,
return NULL;
}
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id)
+{
+ struct prestera_lag *lag;
+ int free_id = -1;
+ int id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = prestera_lag_by_id(sw, id);
+ if (lag->member_count) {
+ if (lag->dev == lag_dev) {
+ *lag_id = id;
+ return 0;
+ }
+ } else if (free_id < 0) {
+ free_id = id;
+ }
+ }
+ if (free_id < 0)
+ return -ENOSPC;
+ *lag_id = free_id;
+ return 0;
+}
+
static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw,
struct net_device *lag_dev)
{
@@ -876,6 +1200,150 @@ static int prestera_netdev_event_handler(struct notifier_block *nb,
return notifier_from_errno(err);
}
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_flood_domain *flood_domain;
+ struct prestera_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ flood_domain = prestera_flood_domain_create(sw);
+ if (!flood_domain)
+ goto err_flood_domain_create;
+
+ mdb_entry->sw = sw;
+ mdb_entry->vid = vid;
+ mdb_entry->flood_domain = flood_domain;
+ ether_addr_copy(mdb_entry->addr, addr);
+
+ if (prestera_hw_mdb_create(mdb_entry))
+ goto err_mdb_hw_create;
+
+ return mdb_entry;
+
+err_mdb_hw_create:
+ prestera_flood_domain_destroy(flood_domain);
+err_flood_domain_create:
+ kfree(mdb_entry);
+err_mdb_alloc:
+ return NULL;
+}
+
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry)
+{
+ prestera_hw_mdb_destroy(mdb_entry);
+ prestera_flood_domain_destroy(mdb_entry->flood_domain);
+ kfree(mdb_entry);
+}
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw)
+{
+ struct prestera_flood_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->sw = sw;
+
+ if (prestera_hw_flood_domain_create(domain)) {
+ kfree(domain);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&domain->flood_domain_port_list);
+
+ return domain;
+}
+
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain)
+{
+ WARN_ON(!list_empty(&flood_domain->flood_domain_port_list));
+ WARN_ON_ONCE(prestera_hw_flood_domain_destroy(flood_domain));
+ kfree(flood_domain);
+}
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ bool is_first_port_in_list = false;
+ int err;
+
+ flood_domain_port = kzalloc(sizeof(*flood_domain_port), GFP_KERNEL);
+ if (!flood_domain_port) {
+ err = -ENOMEM;
+ goto err_port_alloc;
+ }
+
+ flood_domain_port->vid = vid;
+
+ if (list_empty(&flood_domain->flood_domain_port_list))
+ is_first_port_in_list = true;
+
+ list_add(&flood_domain_port->flood_domain_port_node,
+ &flood_domain->flood_domain_port_list);
+
+ flood_domain_port->flood_domain = flood_domain;
+ flood_domain_port->dev = dev;
+
+ if (!is_first_port_in_list) {
+ err = prestera_hw_flood_domain_ports_reset(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+ }
+
+ err = prestera_hw_flood_domain_ports_set(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+
+ return 0;
+
+err_prestera_mdb_port_create_hw:
+ list_del(&flood_domain_port->flood_domain_port_node);
+ kfree(flood_domain_port);
+err_port_alloc:
+ return err;
+}
+
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port)
+{
+ struct prestera_flood_domain *flood_domain = port->flood_domain;
+
+ list_del(&port->flood_domain_port_node);
+
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_reset(flood_domain));
+
+ if (!list_empty(&flood_domain->flood_domain_port_list))
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_set(flood_domain));
+
+ kfree(port);
+}
+
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ list_for_each_entry(flood_domain_port,
+ &flood_domain->flood_domain_port_list,
+ flood_domain_port_node)
+ if (flood_domain_port->dev == dev &&
+ vid == flood_domain_port->vid)
+ return flood_domain_port;
+
+ return NULL;
+}
+
static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
{
sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
@@ -892,6 +1360,8 @@ static int prestera_switch_init(struct prestera_switch *sw)
{
int err;
+ sw->np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
+
err = prestera_hw_switch_init(sw);
if (err) {
dev_err(prestera_dev(sw), "Failed to init Switch device\n");
@@ -992,6 +1462,7 @@ static void prestera_switch_fini(struct prestera_switch *sw)
prestera_router_fini(sw);
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
+ of_node_put(sw->np);
}
int prestera_device_register(struct prestera_device *dev)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 3c8116f16b4d..58f4e44d5ad7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -389,8 +389,8 @@ static int __prestera_inetaddr_event(struct prestera_switch *sw,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (!prestera_netdev_check(dev) || netif_is_bridge_port(dev) ||
- netif_is_lag_port(dev) || netif_is_ovs_port(dev))
+ if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) ||
+ netif_is_lag_port(dev))
return 0;
return __prestera_inetaddr_port_event(dev, event, extack);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index b4599fe4ca8d..71cde97d85c8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -39,7 +39,10 @@ struct prestera_bridge {
struct net_device *dev;
struct prestera_switchdev *swdev;
struct list_head port_list;
+ struct list_head br_mdb_entry_list;
+ bool mrouter_exist;
bool vlan_enabled;
+ bool multicast_enabled;
u16 bridge_id;
};
@@ -48,8 +51,10 @@ struct prestera_bridge_port {
struct net_device *dev;
struct prestera_bridge *bridge;
struct list_head vlan_list;
+ struct list_head br_mdb_port_list;
refcount_t ref_count;
unsigned long flags;
+ bool mrouter;
u8 stp_state;
};
@@ -67,6 +72,20 @@ struct prestera_port_vlan {
u16 vid;
};
+struct prestera_br_mdb_port {
+ struct prestera_bridge_port *br_port;
+ struct list_head br_mdb_port_node;
+};
+
+/* Software representation of MDB table. */
+struct prestera_br_mdb_entry {
+ struct prestera_bridge *bridge;
+ struct prestera_mdb_entry *mdb;
+ struct list_head br_mdb_port_list;
+ struct list_head br_mdb_entry_node;
+ bool enabled;
+};
+
static struct workqueue_struct *swdev_wq;
static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
@@ -74,6 +93,82 @@ static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
u8 state);
+static struct prestera_bridge *
+prestera_bridge_find(const struct prestera_switch *sw,
+ const struct net_device *br_dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &sw->swdev->bridge_list, head)
+ if (bridge->dev == br_dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_find(const struct prestera_bridge *bridge,
+ const struct net_device *brport_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head)
+ if (br_port->dev == brport_dev)
+ return br_port;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_find(struct prestera_switch *sw,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_find(sw, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_find(bridge, brport_dev);
+}
+
+static void
+prestera_br_port_flags_reset(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ prestera_port_uc_flood_set(port, false);
+ prestera_port_mc_flood_set(port, false);
+ prestera_port_learning_set(port, false);
+}
+
+static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_port_uc_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_mc_flood_set(port, br_port->flags & BR_MCAST_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ prestera_br_port_flags_reset(br_port, port);
+ return err;
+}
+
static struct prestera_bridge_vlan *
prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
{
@@ -220,6 +315,70 @@ static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode)
}
static void
+prestera_mdb_port_del(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_flood_domain *fl_domain = mdb->flood_domain;
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ flood_domain_port = prestera_flood_domain_port_find(fl_domain,
+ orig_dev,
+ mdb->vid);
+ if (flood_domain_port)
+ prestera_flood_domain_port_destroy(flood_domain_port);
+}
+
+static void
+prestera_br_mdb_entry_put(struct prestera_br_mdb_entry *br_mdb)
+{
+ struct prestera_bridge_port *br_port;
+
+ if (list_empty(&br_mdb->br_mdb_port_list)) {
+ list_for_each_entry(br_port, &br_mdb->bridge->port_list, head)
+ prestera_mdb_port_del(br_mdb->mdb, br_port->dev);
+
+ prestera_mdb_entry_destroy(br_mdb->mdb);
+ list_del(&br_mdb->br_mdb_entry_node);
+ kfree(br_mdb);
+ }
+}
+
+static void
+prestera_br_mdb_port_del(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp;
+
+ list_for_each_entry_safe(br_mdb_port, tmp, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ if (br_mdb_port->br_port == br_port) {
+ list_del(&br_mdb_port->br_mdb_port_node);
+ kfree(br_mdb_port);
+ }
+ }
+}
+
+static void
+prestera_mdb_flush_bridge_port(struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp_port;
+ struct prestera_br_mdb_entry *br_mdb, *br_mdb_tmp;
+ struct prestera_bridge *br_dev = br_port->bridge;
+
+ list_for_each_entry_safe(br_mdb, br_mdb_tmp, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ list_for_each_entry_safe(br_mdb_port, tmp_port,
+ &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ prestera_mdb_port_del(br_mdb->mdb,
+ br_mdb_port->br_port->dev);
+ prestera_br_mdb_port_del(br_mdb, br_mdb_port->br_port);
+ }
+ prestera_br_mdb_entry_put(br_mdb);
+ }
+}
+
+static void
prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
{
u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
@@ -244,6 +403,8 @@ prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
else
prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+ prestera_mdb_flush_bridge_port(br_port);
+
list_del(&port_vlan->br_vlan_head);
prestera_bridge_vlan_put(br_vlan);
prestera_bridge_port_put(br_port);
@@ -295,8 +456,10 @@ prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
bridge->vlan_enabled = vlan_enabled;
bridge->swdev = swdev;
bridge->dev = dev;
+ bridge->multicast_enabled = br_multicast_enabled(dev);
INIT_LIST_HEAD(&bridge->port_list);
+ INIT_LIST_HEAD(&bridge->br_mdb_entry_list);
list_add(&bridge->head, &swdev->bridge_list);
@@ -314,6 +477,7 @@ static void prestera_bridge_destroy(struct prestera_bridge *bridge)
else
prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+ WARN_ON(!list_empty(&bridge->br_mdb_entry_list));
WARN_ON(!list_empty(&bridge->port_list));
kfree(bridge);
}
@@ -405,6 +569,7 @@ prestera_bridge_port_create(struct prestera_bridge *bridge,
INIT_LIST_HEAD(&br_port->vlan_list);
list_add(&br_port->head, &bridge->port_list);
+ INIT_LIST_HEAD(&br_port->br_mdb_port_list);
return br_port;
}
@@ -414,6 +579,7 @@ prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
{
list_del(&br_port->head);
WARN_ON(!list_empty(&br_port->vlan_list));
+ WARN_ON(!list_empty(&br_port->br_mdb_port_list));
kfree(br_port);
}
@@ -461,19 +627,13 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
if (err)
return err;
- err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
- br_port->flags);
+ err = prestera_br_port_flags_set(br_port, port);
if (err)
- goto err_port_flood_set;
-
- err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
- if (err)
- goto err_port_learning_set;
+ goto err_flags2port_set;
return 0;
-err_port_learning_set:
-err_port_flood_set:
+err_flags2port_set:
prestera_hw_bridge_port_delete(port, bridge->bridge_id);
return err;
@@ -592,8 +752,9 @@ void prestera_bridge_port_leave(struct net_device *br_dev,
switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
- prestera_hw_port_learning_set(port, false);
- prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0);
+ prestera_mdb_flush_bridge_port(br_port);
+
+ prestera_br_port_flags_reset(br_port, port);
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
prestera_bridge_port_put(br_port);
}
@@ -603,26 +764,14 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
struct switchdev_brport_flags flags)
{
struct prestera_bridge_port *br_port;
- int err;
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
- err = prestera_hw_port_flood_set(port, flags.mask, flags.val);
- if (err)
- return err;
-
- if (flags.mask & BR_LEARNING) {
- err = prestera_hw_port_learning_set(port,
- flags.val & BR_LEARNING);
- if (err)
- return err;
- }
-
- memcpy(&br_port->flags, &flags.val, sizeof(flags.val));
-
- return 0;
+ br_port->flags &= ~flags.mask;
+ br_port->flags |= flags.val & flags.mask;
+ return prestera_br_port_flags_set(br_port, port);
}
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
@@ -716,6 +865,290 @@ err_port_stp_set:
return err;
}
+static int
+prestera_br_port_lag_mdb_mc_enable_sync(struct prestera_bridge_port *br_port,
+ bool enabled)
+{
+ struct prestera_port *pr_port;
+ struct prestera_switch *sw;
+ u16 lag_id;
+ int err;
+
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+ if (!pr_port)
+ return 0;
+
+ sw = pr_port->sw;
+ err = prestera_lag_id(sw, br_port->dev, &lag_id);
+ if (err)
+ return err;
+
+ list_for_each_entry(pr_port, &sw->port_list, list) {
+ if (pr_port->lag->lag_id == lag_id) {
+ err = prestera_port_mc_flood_set(pr_port, enabled);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int prestera_br_mdb_mc_enable_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_port *port;
+ bool enabled;
+ int err;
+
+ /* if mrouter exists:
+ * - make sure every mrouter receives unreg mcast traffic;
+ * if mrouter doesn't exists:
+ * - make sure every port receives unreg mcast traffic;
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ if (br_dev->multicast_enabled && br_dev->mrouter_exist)
+ enabled = br_port->mrouter;
+ else
+ enabled = br_port->flags & BR_MCAST_FLOOD;
+
+ if (netif_is_lag_master(br_port->dev)) {
+ err = prestera_br_port_lag_mdb_mc_enable_sync(br_port,
+ enabled);
+ if (err)
+ return err;
+ continue;
+ }
+
+ port = prestera_port_dev_lower_find(br_port->dev);
+ if (!port)
+ continue;
+
+ err = prestera_port_mc_flood_set(port, enabled);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool
+prestera_br_mdb_port_is_member(struct prestera_br_mdb_entry *br_mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_br_mdb_port *tmp_port;
+
+ list_for_each_entry(tmp_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (tmp_port->br_port->dev == orig_dev)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_mdb_port_add(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev,
+ const unsigned char addr[ETH_ALEN], u16 vid)
+{
+ struct prestera_flood_domain *flood_domain = mdb->flood_domain;
+ int err;
+
+ if (!prestera_flood_domain_port_find(flood_domain,
+ orig_dev, vid)) {
+ err = prestera_flood_domain_port_create(flood_domain, orig_dev,
+ vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Sync bridge mdb (software table) with HW table (if MC is enabled). */
+static int prestera_br_mdb_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+ struct prestera_bridge_port *br_port;
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_mdb_entry *mdb;
+ struct prestera_port *pr_port;
+ int err = 0;
+
+ if (!br_dev->multicast_enabled)
+ return 0;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ mdb = br_mdb->mdb;
+ /* Make sure every port that explicitly been added to the mdb
+ * joins the specified group.
+ */
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ br_port = br_mdb_port->br_port;
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Match only mdb and br_mdb ports that belong to the
+ * same broadcast domain.
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ /* If port is not in MDB or there's no Mrouter
+ * clear HW mdb.
+ */
+ if (prestera_br_mdb_port_is_member(br_mdb,
+ br_mdb_port->br_port->dev) &&
+ br_dev->mrouter_exist)
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ else
+ prestera_mdb_port_del(mdb, br_port->dev);
+
+ if (err)
+ return err;
+ }
+
+ /* Make sure that every mrouter port joins every MC group int
+ * broadcast domain. If it's not an mrouter - it should leave
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Make sure mrouter woudln't receive traffci from
+ * another broadcast domain (e.g. from a vlan, which
+ * mrouter port is not a member of).
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ if (br_port->mrouter) {
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ if (err)
+ return err;
+ } else if (!br_port->mrouter &&
+ !prestera_br_mdb_port_is_member
+ (br_mdb, br_port->dev)) {
+ prestera_mdb_port_del(mdb, br_port->dev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+prestera_mdb_enable_set(struct prestera_br_mdb_entry *br_mdb, bool enable)
+{
+ int err;
+
+ if (enable != br_mdb->enabled) {
+ if (enable)
+ err = prestera_hw_mdb_create(br_mdb->mdb);
+ else
+ err = prestera_hw_mdb_destroy(br_mdb->mdb);
+
+ if (err)
+ return err;
+
+ br_mdb->enabled = enable;
+ }
+
+ return 0;
+}
+
+static int
+prestera_br_mdb_enable_set(struct prestera_bridge *br_dev, bool enable)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ int err;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ err = prestera_mdb_enable_set(br_mdb, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_mc_disabled_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *br_dev;
+
+ br_dev = prestera_bridge_find(sw, orig_dev);
+ if (!br_dev)
+ return 0;
+
+ br_dev->multicast_enabled = !mc_disabled;
+
+ /* There's no point in enabling mdb back if router is missing. */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
+static bool
+prestera_bridge_mdb_mc_mrouter_exists(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &br_dev->port_list, head)
+ if (br_port->mrouter)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_port_attr_mrouter_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool is_port_mrouter)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+
+ br_port = prestera_bridge_port_find(port->sw, orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+ br_port->mrouter = is_port_mrouter;
+
+ br_dev->mrouter_exist = prestera_bridge_mdb_mc_mrouter_exists(br_dev);
+
+ /* Enable MDB processing if both mrouter exists and mc is enabled.
+ * In case if MC enabled, but there is no mrouter, device would flood
+ * all multicast traffic (even if MDB table is not empty) with the use
+ * of bridge's flood capabilities (without the use of flood_domain).
+ */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -745,6 +1178,14 @@ static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
attr->u.vlan_filtering);
break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ err = prestera_port_attr_mrouter_set(port, attr->orig_dev,
+ attr->u.mrouter);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ err = prestera_port_attr_br_mc_disabled_set(port, attr->orig_dev,
+ attr->u.mc_disabled);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -918,14 +1359,9 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
if (port_vlan->br_port)
return 0;
- err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
- br_port->flags);
+ err = prestera_br_port_flags_set(br_port, port);
if (err)
- return err;
-
- err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
- if (err)
- goto err_port_learning_set;
+ goto err_flags2port_set;
err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
if (err)
@@ -950,8 +1386,8 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
err_bridge_vlan_get:
prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
err_port_vid_stp_set:
- prestera_hw_port_learning_set(port, false);
-err_port_learning_set:
+ prestera_br_port_flags_reset(br_port, port);
+err_flags2port_set:
return err;
}
@@ -1048,20 +1484,162 @@ static int prestera_port_vlans_add(struct prestera_port *port,
flag_pvid, extack);
}
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_create(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb_entry;
+ struct prestera_mdb_entry *mdb_entry;
+
+ br_mdb_entry = kzalloc(sizeof(*br_mdb_entry), GFP_KERNEL);
+ if (!br_mdb_entry)
+ return NULL;
+
+ mdb_entry = prestera_mdb_entry_create(sw, addr, vid);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ br_mdb_entry->mdb = mdb_entry;
+ br_mdb_entry->bridge = br_dev;
+ br_mdb_entry->enabled = true;
+ INIT_LIST_HEAD(&br_mdb_entry->br_mdb_port_list);
+
+ list_add(&br_mdb_entry->br_mdb_entry_node, &br_dev->br_mdb_entry_list);
+
+ return br_mdb_entry;
+
+err_mdb_alloc:
+ kfree(br_mdb_entry);
+ return NULL;
+}
+
+static int prestera_br_mdb_port_add(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (br_mdb_port->br_port == br_port)
+ return 0;
+
+ br_mdb_port = kzalloc(sizeof(*br_mdb_port), GFP_KERNEL);
+ if (!br_mdb_port)
+ return -ENOMEM;
+
+ br_mdb_port->br_port = br_port;
+ list_add(&br_mdb_port->br_mdb_port_node,
+ &br_mdb->br_mdb_port_list);
+
+ return 0;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_find(struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node)
+ if (ether_addr_equal(&br_mdb->mdb->addr[0], addr) &&
+ vid == br_mdb->mdb->vid)
+ return br_mdb;
+
+ return NULL;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_get(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ br_mdb = prestera_br_mdb_entry_find(br_dev, addr, vid);
+ if (br_mdb)
+ return br_mdb;
+
+ return prestera_br_mdb_entry_create(sw, br_dev, addr, vid);
+}
+
+static int
+prestera_mdb_port_addr_obj_add(const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ struct prestera_switch *sw;
+ struct prestera_port *port;
+ int err;
+
+ sw = prestera_switch_get(mdb->obj.orig_dev);
+ port = prestera_port_dev_lower_find(mdb->obj.orig_dev);
+
+ br_port = prestera_bridge_port_find(sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ if (mdb->vid)
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ br_dev->bridge_id);
+
+ if (!br_mdb)
+ return -ENOMEM;
+
+ /* Make sure newly allocated MDB entry gets disabled if either MC is
+ * disabled, or the mrouter does not exist.
+ */
+ WARN_ON(prestera_mdb_enable_set(br_mdb, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ err = prestera_br_mdb_port_add(br_mdb, br_port);
+ if (err) {
+ prestera_br_mdb_entry_put(br_mdb);
+ return err;
+ }
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int prestera_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
const struct switchdev_obj_port_vlan *vlan;
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
return prestera_port_vlans_add(port, vlan, extack);
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_add(mdb);
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ fallthrough;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+
+ return err;
}
static int prestera_port_vlans_del(struct prestera_port *port,
@@ -1086,17 +1664,71 @@ static int prestera_port_vlans_del(struct prestera_port *port,
return 0;
}
+static int
+prestera_mdb_port_addr_obj_del(struct prestera_port *port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ int err;
+
+ /* Bridge port no longer exists - and so does this MDB entry */
+ br_port = prestera_bridge_port_find(port->sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ /* Removing MDB with non-existing VLAN - not supported; */
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (br_port->bridge->vlan_enabled)
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ br_port->bridge->bridge_id);
+
+ if (!br_mdb)
+ return 0;
+
+ /* Since there might be a situation that this port was the last in the
+ * MDB group, we have to both remove this port from software and HW MDB,
+ * sync MDB table, and then destroy software MDB (if needed).
+ */
+ prestera_br_mdb_port_del(br_mdb, br_port);
+
+ prestera_br_mdb_entry_put(br_mdb);
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int prestera_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_del(port, mdb);
+ break;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+
+ return err;
}
static int prestera_switchdev_blk_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index a1e907c85217..bbea5458000b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1863,7 +1863,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (mss != 0) {
if (!(hw->flags & SKY2_HW_NEW_LE))
- mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss += skb_tcp_all_headers(skb);
if (mss != sky2->tx_last_mss) {
le = get_tx_le(sky2, &slot);
@@ -4711,7 +4711,7 @@ static irqreturn_t sky2_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int sky2_test_msi(struct sky2_hw *hw)
{
struct pci_dev *pdev = hw->pdev;
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index da4ec235d146..97374fb3ee79 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -17,6 +17,8 @@ config NET_MEDIATEK_SOC
select PINCTRL
select PHYLINK
select DIMLIB
+ select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 59c9a10f83ba..d9426b01f462 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -34,6 +34,10 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
+ sizeof(u64) }
+
static const struct mtk_reg_map mtk_reg_map = {
.tx_irq_mask = 0x1a1c,
.tx_irq_status = 0x1a18,
@@ -141,6 +145,13 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_long_errors),
MTK_ETHTOOL_STAT(rx_checksum_errors),
MTK_ETHTOOL_STAT(rx_flow_control_packets),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
};
static const char * const mtk_clks_source_name[] = {
@@ -990,7 +1001,7 @@ static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
- bool napi)
+ struct xdp_frame_bulk *bq, bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
@@ -1020,15 +1031,27 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
}
}
- tx_buf->flags = 0;
- if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
- if (napi)
- napi_consume_skb(tx_buf->skb, napi);
- else
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+ } else {
+ struct xdp_frame *xdpf = tx_buf->data;
+
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else if (bq)
+ xdp_return_frame_bulk(xdpf, bq);
+ else
+ xdp_return_frame(xdpf);
+ }
}
- tx_buf->skb = NULL;
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
}
static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
@@ -1045,7 +1068,7 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len1, size);
} else {
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd->txd1 = mapped_addr;
txd->txd2 = TX_DMA_PLEN0(size);
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -1221,7 +1244,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
@@ -1235,7 +1258,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
}
/* store skb to cleanup */
- itx_buf->skb = skb;
+ itx_buf->type = MTK_TYPE_SKB;
+ itx_buf->data = skb;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
@@ -1274,7 +1298,7 @@ err_dma:
tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */
- mtk_tx_unmap(eth, tx_buf, false);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
@@ -1432,11 +1456,320 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
}
}
+static bool mtk_page_pool_enabled(struct mtk_eth *eth)
+{
+ return !eth->hwlro;
+}
+
+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
+ : DMA_FROM_DEVICE;
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
+ id, PAGE_SIZE);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ return pp;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(xdp_q);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return ERR_PTR(err);
+}
+
+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
+ return page_address(page);
+}
+
+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
+{
+ if (ring->page_pool)
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(data), napi);
+ else
+ skb_free_frag(data);
+}
+
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+ struct mtk_tx_dma_desc_info *txd_info,
+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+ void *data, u16 headroom, int index, bool dma_map)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd_pdma;
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info->addr = dma_map_single(eth->dma_dev, data,
+ txd_info->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+ return -ENOMEM;
+
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(data);
+
+ txd_info->addr = page_pool_get_dma_addr(page) +
+ sizeof(struct xdp_frame) + headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+ txd_info->size, DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+ index);
+
+ return 0;
+}
+
+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+ .last = !xdp_frame_has_frags(xdpf),
+ };
+ int err, index = 0, n_desc = 1, nr_frags;
+ struct mtk_tx_dma *htxd, *txd, *txd_pdma;
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+ void *data = xdpf->data;
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ return -EBUSY;
+
+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
+ return -EBUSY;
+
+ spin_lock(&eth->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+ spin_unlock(&eth->page_lock);
+ return -ENOMEM;
+ }
+ htxd = txd;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+ for (;;) {
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ data, xdpf->headroom, index, dma_map);
+ if (err < 0)
+ goto unmap;
+
+ if (txd_info.last)
+ break;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = skb_frag_size(&sinfo->frags[index]);
+ txd_info.last = index + 1 == nr_frags;
+ data = skb_frag_address(&sinfo->frags[index]);
+
+ index++;
+ }
+ /* store xdpf for cleanup */
+ htx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int idx;
+
+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return 0;
+
+unmap:
+ while (htxd != txd) {
+ txd_pdma = qdma_to_pdma(ring, htxd);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return err;
+}
+
+static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
+ break;
+ nxmit++;
+ }
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
+ u64_stats_update_end(&hw_stats->syncp);
+
+ return nxmit;
+}
+
+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+
+ prog = rcu_dereference(eth->prog);
+ if (!prog)
+ goto out;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ count = &hw_stats->xdp_stats.rx_xdp_pass;
+ goto update_stats;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
+ case XDP_TX: {
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+
+ if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_tx;
+ goto update_stats;
+ }
+ default:
+ bpf_warn_invalid_xdp_action(dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
+
+update_stats:
+ u64_stats_update_begin(&hw_stats->syncp);
+ *count = *count + 1;
+ u64_stats_update_end(&hw_stats->syncp);
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
+ bool xdp_flush = false;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
@@ -1444,8 +1777,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
int done = 0, bytes = 0;
while (done < budget) {
+ unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
- unsigned int pktlen;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@@ -1477,47 +1810,97 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+
/* alloc new buffer */
- if (ring->frag_size <= PAGE_SIZE)
- new_data = napi_alloc_frag(ring->frag_size);
- else
- new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
- if (unlikely(!new_data)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(eth->dma_dev,
- new_data + NET_SKB_PAD +
- eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
- skb_free_frag(new_data);
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
+ if (ring->page_pool) {
+ struct page *page = virt_to_head_page(data);
+ struct xdp_buff xdp;
+ u32 ret;
+
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_sync_single_for_cpu(eth->dma_dev,
+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
+ pktlen, page_pool_get_dma_dir(ring->page_pool));
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
+ false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ ret = mtk_xdp_run(eth, ring, &xdp, netdev);
+ if (ret == XDP_REDIRECT)
+ xdp_flush = true;
- dma_unmap_single(eth->dma_dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
+ if (ret != XDP_PASS)
+ goto skip_rx;
+
+ skb = build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_put_full_page(ring->page_pool,
+ page, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
+ skb_mark_for_recycle(skb);
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ netdev->stats.rx_dropped++;
+ skb_free_frag(data);
+ goto skip_rx;
+ }
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- skb_free_frag(data);
- netdev->stats.rx_dropped++;
- goto skip_rx;
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, pktlen);
}
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
- skb_put(skb, pktlen);
- if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
+ bytes += skb->len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ rxdcsum = &trxd.rxd3;
+ else
+ rxdcsum = &trxd.rxd4;
+
+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- bytes += pktlen;
hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
if (hash != MTK_RXD4_FOE_ENTRY) {
@@ -1555,7 +1938,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
-
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
@@ -1563,7 +1945,6 @@ release_desc:
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx;
-
done++;
}
@@ -1582,6 +1963,9 @@ rx_done:
&dim_sample);
net_dim(&eth->rx_dim, dim_sample);
+ if (xdp_flush)
+ xdp_do_flush_map();
+
return done;
}
@@ -1590,15 +1974,16 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->last_free_ptr;
dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
@@ -1613,22 +1998,26 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[mac] += skb->len;
+ done[mac]++;
+ }
budget--;
}
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = next_cpu;
}
+ xdp_flush_frame_bulk(&bq);
ring->last_free_ptr = cpu;
mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
@@ -1640,27 +2029,30 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[0] += skb->len;
- done[0]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[0] += skb->len;
+ done[0]++;
+ }
budget--;
}
-
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc;
@@ -1668,6 +2060,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
}
+ xdp_flush_frame_bulk(&bq);
ring->cpu_idx = cpu;
@@ -1877,7 +2270,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth, &ring->buf[i], false);
+ mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
}
@@ -1927,13 +2320,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < rx_dma_size; i++) {
- if (ring->frag_size <= PAGE_SIZE)
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
- else
- ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
- if (!ring->data[i])
- return -ENOMEM;
+ if (mtk_page_pool_enabled(eth)) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+ rx_dma_size);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ ring->page_pool = pp;
}
ring->dma = dma_alloc_coherent(eth->dma_dev,
@@ -1944,16 +2339,33 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
for (i = 0; i < rx_dma_size; i++) {
struct mtk_rx_dma_v2 *rxd;
-
- dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ dma_addr_t dma_addr;
+ void *data;
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ data = netdev_alloc_frag(ring->frag_size);
+ else
+ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr)))
+ return -ENOMEM;
+ }
rxd->rxd1 = (unsigned int)dma_addr;
+ ring->data[i] = data;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
@@ -1969,6 +2381,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd8 = 0;
}
}
+
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
@@ -2020,7 +2433,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
dma_unmap_single(eth->dma_dev, rxd->rxd1,
ring->buf_size, DMA_FROM_DEVICE);
- skb_free_frag(ring->data[i]);
+ mtk_rx_put_buff(ring, ring->data[i], false);
}
kfree(ring->data);
ring->data = NULL;
@@ -2032,6 +2445,13 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
ring->dma, ring->phys);
ring->dma = NULL;
}
+
+ if (ring->page_pool) {
+ if (xdp_rxq_info_is_reg(&ring->xdp_q))
+ xdp_rxq_info_unreg(&ring->xdp_q);
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
@@ -2639,6 +3059,48 @@ static int mtk_stop(struct net_device *dev)
return 0;
}
+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct bpf_prog *old_prog;
+ bool need_update;
+
+ if (eth->hwlro) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!eth->prog != !!prog;
+ if (netif_running(dev) && need_update)
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev) && need_update)
+ return mtk_open(dev);
+
+ return 0;
+}
+
+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
@@ -2934,6 +3396,12 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new;
+ if (rcu_access_pointer(eth->prog) &&
+ length > MTK_PP_MAX_BUF_SIZE) {
+ netdev_err(dev, "Invalid MTU for XDP mode\n");
+ return -EINVAL;
+ }
+
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
@@ -3123,11 +3591,18 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
int i;
switch (stringset) {
- case ETH_SS_STATS:
+ case ETH_SS_STATS: {
+ struct mtk_mac *mac = netdev_priv(dev);
+
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ if (mtk_page_pool_enabled(mac->hw))
+ page_pool_ethtool_stats_get_strings(data);
+ break;
+ }
+ default:
break;
}
}
@@ -3135,13 +3610,35 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int mtk_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(mtk_ethtool_stats);
+ case ETH_SS_STATS: {
+ int count = ARRAY_SIZE(mtk_ethtool_stats);
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (mtk_page_pool_enabled(mac->hw))
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+ }
default:
return -EOPNOTSUPP;
}
}
+static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
+ struct mtk_rx_ring *ring = &eth->rx_ring[i];
+
+ if (!ring->page_pool)
+ continue;
+
+ page_pool_get_stats(ring->page_pool, &stats);
+ }
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mtk_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -3169,6 +3666,8 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ if (mtk_page_pool_enabled(mac->hw))
+ mtk_ethtool_pp_stats(mac->hw, data_dst);
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
@@ -3261,6 +3760,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_poll_controller = mtk_poll_controller,
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
+ .ndo_xdp_xmit = mtk_xdp_xmit,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
@@ -3761,6 +4262,7 @@ static const struct mtk_soc_data mt7986_data = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0a632896451a..7405c97cda66 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -18,6 +18,8 @@
#include <linux/rhashtable.h>
#include <linux/dim.h>
#include <linux/bitfield.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
#define MTK_QDMA_PAGE_SIZE 2048
@@ -49,6 +51,11 @@
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
+#define MTK_PP_PAD (MTK_PP_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
+
#define MTK_QRX_OFFSET 0x10
#define MTK_MAX_RX_RING_NUM 4
@@ -563,6 +570,16 @@ struct mtk_tx_dma_v2 {
struct mtk_eth;
struct mtk_mac;
+struct mtk_xdp_stats {
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_pass;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_errors;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_xmit_errors;
+};
+
/* struct mtk_hw_stats - the structure that holds the traffic statistics.
* @stats_lock: make sure that stats operations are atomic
* @reg_offset: the status register offset of the SoC
@@ -586,6 +603,8 @@ struct mtk_hw_stats {
u64 rx_checksum_errors;
u64 rx_flow_control_packets;
+ struct mtk_xdp_stats xdp_stats;
+
spinlock_t stats_lock;
u32 reg_offset;
struct u64_stats_sync syncp;
@@ -677,6 +696,12 @@ enum mtk_dev_state {
MTK_RESETTING
};
+enum mtk_tx_buf_type {
+ MTK_TYPE_SKB,
+ MTK_TYPE_XDP_TX,
+ MTK_TYPE_XDP_NDO,
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -686,7 +711,9 @@ enum mtk_dev_state {
* @dma_len1: The length of the second segment
*/
struct mtk_tx_buf {
- struct sk_buff *skb;
+ enum mtk_tx_buf_type type;
+ void *data;
+
u32 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
@@ -745,6 +772,9 @@ struct mtk_rx_ring {
bool calc_idx_update;
u16 calc_idx;
u32 crx_idx_reg;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_q;
};
enum mkt_eth_capabilities {
@@ -1078,6 +1108,8 @@ struct mtk_eth {
struct mtk_ppe *ppe;
struct rhashtable flow_table;
+
+ struct bpf_prog __rcu *prog;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 5d457bc9acc1..25dc3c3aa31d 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -88,32 +88,28 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
static int
mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
{
- struct net_device_path_ctx ctx = {
- .dev = dev,
- };
- struct net_device_path path = {};
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
- if (!ctx.dev)
+ if (!dev)
return -ENODEV;
- memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
-
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
return -1;
- if (!dev->netdev_ops->ndo_fill_forward_path)
- return -1;
-
- if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
- return -1;
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
- if (path.type != DEV_PATH_MTK_WDMA)
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
return -1;
- info->wdma_idx = path.mtk_wdma.wdma_idx;
- info->queue = path.mtk_wdma.queue;
- info->bss = path.mtk_wdma.bss;
- info->wcid = path.mtk_wdma.wcid;
+ info->wdma_idx = path->mtk_wdma.wdma_idx;
+ info->queue = path->mtk_wdma.queue;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 95839fd84dab..3f0e5e64de50 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
@@ -32,6 +33,7 @@
#define MTK_STAR_SKB_ALIGNMENT 16
#define MTK_STAR_HASHTABLE_MC_LIMIT 256
#define MTK_STAR_HASHTABLE_SIZE_MAX 512
+#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
* work for this controller.
@@ -129,6 +131,11 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_INT_MASK 0x0054
#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
+/* Delay-Macro Register */
+#define MTK_STAR_REG_TEST0 0x0058
+#define MTK_STAR_BIT_INV_RX_CLK BIT(30)
+#define MTK_STAR_BIT_INV_TX_CLK BIT(31)
+
/* Misc. Config Register */
#define MTK_STAR_REG_TEST1 0x005c
#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
@@ -149,6 +156,7 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
#define MTK_STAR_BIT_CLK_DIV_10 0x0a
+#define MTK_STAR_BIT_CLK_DIV_50 0x32
/* Counter registers. */
#define MTK_STAR_REG_C_RXOKPKT 0x0100
@@ -181,9 +189,14 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_C_RX_TWIST 0x0218
/* Ethernet CFG Control */
-#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
-#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
-#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
+#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4
+#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8
+#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10
+#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0
+#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8)
/* Represents the actual structure of descriptors used by the MAC. We can
* reuse the same structure for both TX and RX - the layout is the same, only
@@ -216,7 +229,8 @@ struct mtk_star_ring_desc_data {
struct sk_buff *skb;
};
-#define MTK_STAR_RING_NUM_DESCS 128
+#define MTK_STAR_RING_NUM_DESCS 512
+#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
@@ -231,6 +245,11 @@ struct mtk_star_ring {
unsigned int tail;
};
+struct mtk_star_compat {
+ int (*set_interface_mode)(struct net_device *ndev);
+ unsigned char bit_clk_div;
+};
+
struct mtk_star_priv {
struct net_device *ndev;
@@ -246,7 +265,8 @@ struct mtk_star_priv {
struct mtk_star_ring rx_ring;
struct mii_bus *mii;
- struct napi_struct napi;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
struct device_node *phy_node;
phy_interface_t phy_intf;
@@ -255,6 +275,11 @@ struct mtk_star_priv {
int speed;
int duplex;
int pause;
+ bool rmii_rxc;
+ bool rx_inv;
+ bool tx_inv;
+
+ const struct mtk_star_compat *compat_data;
/* Protects against concurrent descriptor access. */
spinlock_t lock;
@@ -357,19 +382,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
mtk_star_ring_push_head(ring, desc_data, flags);
}
-static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
+static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
{
- return abs(ring->head - ring->tail);
-}
+ u32 avail;
-static bool mtk_star_ring_full(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
-}
+ if (ring->tail > ring->head)
+ avail = ring->tail - ring->head - 1;
+ else
+ avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
-static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) > 0;
+ return avail;
}
static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
@@ -414,6 +436,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
MTK_STAR_BIT_MAC_CFG_NIC_PD);
}
+static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value &= ~MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value &= ~MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
+static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value |= MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value |= MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
/* Unmask the three interrupts we care about, mask all others. */
static void mtk_star_intr_enable(struct mtk_star_priv *priv)
{
@@ -429,20 +481,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
}
-static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
-{
- unsigned int val;
-
- regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
-
- return val;
-}
-
static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
{
unsigned int val;
- val = mtk_star_intr_read(priv);
+ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
return val;
@@ -714,25 +757,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
}
-/* All processing for TX and RX happens in the napi poll callback.
- *
- * FIXME: The interrupt handling should be more fine-grained with each
- * interrupt enabled/disabled independently when needed. Unfortunatly this
- * turned out to impact the driver's stability and until we have something
- * working properly, we're disabling all interrupts during TX & RX processing
- * or when resetting the counter registers.
- */
+/**
+ * mtk_star_handle_irq - Interrupt Handler.
+ * @irq: interrupt number.
+ * @data: pointer to a network interface device structure.
+ * Description : this is the driver interrupt service routine.
+ * it mainly handles:
+ * 1. tx complete interrupt for frame transmission.
+ * 2. rx complete interrupt for frame reception.
+ * 3. MAC Management Counter interrupt to avoid counter overflow.
+ **/
static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{
- struct mtk_star_priv *priv;
- struct net_device *ndev;
-
- ndev = data;
- priv = netdev_priv(ndev);
+ struct net_device *ndev = data;
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int intr_status = mtk_star_intr_ack_all(priv);
+ bool rx, tx;
+
+ rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
+ napi_schedule_prep(&priv->rx_napi);
+ tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
+ napi_schedule_prep(&priv->tx_napi);
+
+ if (rx || tx) {
+ spin_lock(&priv->lock);
+ /* mask Rx and TX Complete interrupt */
+ mtk_star_disable_dma_irq(priv, rx, tx);
+ spin_unlock(&priv->lock);
+
+ if (rx)
+ __napi_schedule(&priv->rx_napi);
+ if (tx)
+ __napi_schedule(&priv->tx_napi);
+ }
- if (netif_running(ndev)) {
- mtk_star_intr_disable(priv);
- napi_schedule(&priv->napi);
+ /* interrupt is triggered once any counters reach 0x8000000 */
+ if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
}
return IRQ_HANDLED;
@@ -821,32 +883,26 @@ static void mtk_star_phy_config(struct mtk_star_priv *priv)
val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
- /* Only full-duplex supported for now. */
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
-
- regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
-
if (priv->pause) {
- val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
- val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
- val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
} else {
- val = 0;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
}
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
+ val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
+ val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
+ val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
- if (priv->pause) {
- val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
- val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
- } else {
- val = 0;
- }
-
+ val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
+ val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
}
@@ -898,14 +954,7 @@ static void mtk_star_init_config(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
MTK_STAR_MSK_MAC_CLK_CONF,
- MTK_STAR_BIT_CLK_DIV_10);
-}
-
-static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
-{
- regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
- MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
- MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
+ priv->compat_data->bit_clk_div);
}
static int mtk_star_enable(struct net_device *ndev)
@@ -951,11 +1000,12 @@ static int mtk_star_enable(struct net_device *ndev)
/* Request the interrupt */
ret = request_irq(ndev->irq, mtk_star_handle_irq,
- IRQF_TRIGGER_FALLING, ndev->name, ndev);
+ IRQF_TRIGGER_NONE, ndev->name, ndev);
if (ret)
goto err_free_skbs;
- napi_enable(&priv->napi);
+ napi_enable(&priv->tx_napi);
+ napi_enable(&priv->rx_napi);
mtk_star_intr_ack_all(priv);
mtk_star_intr_enable(priv);
@@ -988,7 +1038,8 @@ static void mtk_star_disable(struct net_device *ndev)
struct mtk_star_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
- napi_disable(&priv->napi);
+ napi_disable(&priv->tx_napi);
+ napi_disable(&priv->rx_napi);
mtk_star_intr_disable(priv);
mtk_star_dma_disable(priv);
mtk_star_intr_ack_all(priv);
@@ -1020,13 +1071,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev,
return phy_mii_ioctl(ndev->phydev, req, cmd);
}
-static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ netif_stop_queue(priv->ndev);
+
+ /* Might race with mtk_star_tx_poll, check again */
+ smp_mb();
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
+ return -EBUSY;
+
+ netif_start_queue(priv->ndev);
+
+ return 0;
+}
+
+static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
+ return 0;
+
+ return __mtk_star_maybe_stop_tx(priv, size);
+}
+
+static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct mtk_star_ring *ring = &priv->tx_ring;
struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc_data desc_data;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ /* This is a hard error, log it. */
+ pr_err_ratelimited("Tx ring full when queue awake\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
if (dma_mapping_error(dev, desc_data.dma_addr))
@@ -1034,17 +1117,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
desc_data.skb = skb;
desc_data.len = skb->len;
-
- spin_lock_bh(&priv->lock);
-
mtk_star_ring_push_head_tx(ring, &desc_data);
netdev_sent_queue(ndev, skb->len);
- if (mtk_star_ring_full(ring))
- netif_stop_queue(ndev);
-
- spin_unlock_bh(&priv->lock);
+ mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
mtk_star_dma_resume_tx(priv);
@@ -1076,31 +1153,40 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
return ret;
}
-static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
+static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
{
+ struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
+ tx_napi);
+ int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
struct mtk_star_ring *ring = &priv->tx_ring;
struct net_device *ndev = priv->ndev;
- int ret, pkts_compl, bytes_compl;
- bool wake = false;
-
- spin_lock(&priv->lock);
-
- for (pkts_compl = 0, bytes_compl = 0;;
- pkts_compl++, bytes_compl += ret, wake = true) {
- if (!mtk_star_ring_descs_available(ring))
- break;
+ unsigned int head = ring->head;
+ unsigned int entry = ring->tail;
+ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
if (ret < 0)
break;
+
+ count++;
+ pkts_compl++;
+ bytes_compl += ret;
+ entry = ring->tail;
}
netdev_completed_queue(ndev, pkts_compl, bytes_compl);
- if (wake && netif_queue_stopped(ndev))
+ if (unlikely(netif_queue_stopped(ndev)) &&
+ (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
netif_wake_queue(ndev);
- spin_unlock(&priv->lock);
+ if (napi_complete(napi)) {
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, false, true);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
}
static void mtk_star_netdev_get_stats64(struct net_device *ndev,
@@ -1180,7 +1266,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
{
struct mtk_star_ring *ring = &priv->rx_ring;
struct device *dev = mtk_star_get_dev(priv);
@@ -1188,107 +1274,85 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
struct net_device *ndev = priv->ndev;
struct sk_buff *curr_skb, *new_skb;
dma_addr_t new_dma_addr;
- int ret;
+ int ret, count = 0;
- spin_lock(&priv->lock);
- ret = mtk_star_ring_pop_tail(ring, &desc_data);
- spin_unlock(&priv->lock);
- if (ret)
- return -1;
+ while (count < budget) {
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return -1;
- curr_skb = desc_data.skb;
+ curr_skb = desc_data.skb;
- if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
- (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
- /* Error packet -> drop and reuse skb. */
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+ /* Error packet -> drop and reuse skb. */
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- /* Prepare new skb before receiving the current one. Reuse the current
- * skb if we fail at any point.
- */
- new_skb = mtk_star_alloc_skb(ndev);
- if (!new_skb) {
- ndev->stats.rx_dropped++;
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ /* Prepare new skb before receiving the current one.
+ * Reuse the current skb if we fail at any point.
+ */
+ new_skb = mtk_star_alloc_skb(ndev);
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
- if (dma_mapping_error(dev, new_dma_addr)) {
- ndev->stats.rx_dropped++;
- dev_kfree_skb(new_skb);
- new_skb = curr_skb;
- netdev_err(ndev, "DMA mapping error of RX descriptor\n");
- goto push_new_skb;
- }
+ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma_addr)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb(new_skb);
+ new_skb = curr_skb;
+ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+ goto push_new_skb;
+ }
- /* We can't fail anymore at this point: it's safe to unmap the skb. */
- mtk_star_dma_unmap_rx(priv, &desc_data);
+ /* We can't fail anymore at this point:
+ * it's safe to unmap the skb.
+ */
+ mtk_star_dma_unmap_rx(priv, &desc_data);
- skb_put(desc_data.skb, desc_data.len);
- desc_data.skb->ip_summed = CHECKSUM_NONE;
- desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
- desc_data.skb->dev = ndev;
- netif_receive_skb(desc_data.skb);
+ skb_put(desc_data.skb, desc_data.len);
+ desc_data.skb->ip_summed = CHECKSUM_NONE;
+ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+ desc_data.skb->dev = ndev;
+ netif_receive_skb(desc_data.skb);
- /* update dma_addr for new skb */
- desc_data.dma_addr = new_dma_addr;
+ /* update dma_addr for new skb */
+ desc_data.dma_addr = new_dma_addr;
push_new_skb:
- desc_data.len = skb_tailroom(new_skb);
- desc_data.skb = new_skb;
- spin_lock(&priv->lock);
- mtk_star_ring_push_head_rx(ring, &desc_data);
- spin_unlock(&priv->lock);
+ count++;
- return 0;
-}
-
-static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
-{
- int received, ret;
-
- for (received = 0, ret = 0; received < budget && ret == 0; received++)
- ret = mtk_star_receive_packet(priv);
+ desc_data.len = skb_tailroom(new_skb);
+ desc_data.skb = new_skb;
+ mtk_star_ring_push_head_rx(ring, &desc_data);
+ }
mtk_star_dma_resume_rx(priv);
- return received;
+ return count;
}
-static int mtk_star_poll(struct napi_struct *napi, int budget)
+static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
- unsigned int status;
- int received = 0;
-
- priv = container_of(napi, struct mtk_star_priv, napi);
-
- status = mtk_star_intr_read(priv);
- mtk_star_intr_ack_all(priv);
+ int work_done = 0;
- if (status & MTK_STAR_BIT_INT_STS_TNTC)
- /* Clean-up all TX descriptors. */
- mtk_star_tx_complete_all(priv);
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
- if (status & MTK_STAR_BIT_INT_STS_FNRC)
- /* Receive up to $budget packets. */
- received = mtk_star_process_rx(priv, budget);
-
- if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
- mtk_star_update_stats(priv);
- mtk_star_reset_counters(priv);
+ work_done = mtk_star_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, true, false);
+ spin_unlock(&priv->lock);
}
- if (received < budget)
- napi_complete_done(napi, received);
-
- mtk_star_intr_enable(priv);
-
- return received;
+ return work_done;
}
static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
@@ -1442,6 +1506,25 @@ static void mtk_star_clk_disable_unprepare(void *data)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+static int mtk_star_set_timing(struct mtk_star_priv *priv)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int delay_val = 0;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
+}
+
static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
@@ -1460,6 +1543,7 @@ static int mtk_star_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
+ priv->compat_data = of_device_get_match_data(&pdev->dev);
SET_NETDEV_DEV(ndev, dev);
platform_set_drvdata(pdev, ndev);
@@ -1510,7 +1594,8 @@ static int mtk_star_probe(struct platform_device *pdev)
ret = of_get_phy_mode(of_node, &priv->phy_intf);
if (ret) {
return ret;
- } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
+ } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
+ priv->phy_intf != PHY_INTERFACE_MODE_MII) {
dev_err(dev, "unsupported phy mode: %s\n",
phy_modes(priv->phy_intf));
return -EINVAL;
@@ -1522,7 +1607,23 @@ static int mtk_star_probe(struct platform_device *pdev)
return -ENODEV;
}
- mtk_star_set_mode_rmii(priv);
+ priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
+ priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
+ priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
+
+ if (priv->compat_data->set_interface_mode) {
+ ret = priv->compat_data->set_interface_mode(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
+ return -EINVAL;
+ }
+ }
+
+ ret = mtk_star_set_timing(priv);
+ if (ret) {
+ dev_err(dev, "Failed to set timing, err = %d\n", ret);
+ return -EINVAL;
+ }
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1550,16 +1651,92 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll,
+ NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
return devm_register_netdev(dev, ndev);
}
#ifdef CONFIG_OF
+static int mt8516_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val, ret, rmii_rxc;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ rmii_rxc = 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG1_CON,
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
+ rmii_rxc);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG0_CON,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
+ intf_val);
+}
+
+static int mt8365_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG_CON_V2,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
+ intf_val);
+}
+
+static const struct mtk_star_compat mtk_star_mt8516_compat = {
+ .set_interface_mode = mt8516_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
+};
+
+static const struct mtk_star_compat mtk_star_mt8365_compat = {
+ .set_interface_mode = mt8365_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
+};
+
static const struct of_device_id mtk_star_of_match[] = {
- { .compatible = "mediatek,mt8516-eth", },
- { .compatible = "mediatek,mt8518-eth", },
- { .compatible = "mediatek,mt8175-eth", },
+ { .compatible = "mediatek,mt8516-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8518-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8175-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8365-eth",
+ .data = &mtk_star_mt8365_compat },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_star_of_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 5b11557f1ae4..0eb7b83637d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -204,9 +204,13 @@ out:
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
{
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
int err = 0;
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
@@ -215,6 +219,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
err);
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
}
static void dump_err_buf(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index ac5468b77488..82a07a31cde7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -226,10 +226,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create cr-space region */
crdump->region_crspace =
- devlink_region_create(devlink,
- &region_cr_space_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- pci_resource_len(pdev, 0));
+ devl_region_create(devlink,
+ &region_cr_space_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_cr_space_str,
@@ -237,10 +237,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create fw-health region */
crdump->region_fw_health =
- devlink_region_create(devlink,
- &region_fw_health_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- HEALTH_BUFFER_SIZE);
+ devl_region_create(devlink,
+ &region_fw_health_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_fw_health_str,
@@ -253,6 +253,6 @@ void mlx4_crdump_end(struct mlx4_dev *dev)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
- devlink_region_destroy(crdump->region_fw_health);
- devlink_region_destroy(crdump->region_crspace);
+ devl_region_destroy(crdump->region_fw_health);
+ devl_region_destroy(crdump->region_crspace);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index af3b2b59a2a6..43a4102e9c09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -645,7 +645,7 @@ static int get_real_size(const struct sk_buff *skb,
*inline_ok = false;
*hopbyhop = 0;
if (skb->encapsulation) {
- *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ *lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
/* Detects large IPV6 TCP packets and prepares for removal of
* HBH header that has been pushed by ip6_xmit(),
@@ -653,7 +653,7 @@ static int get_real_size(const struct sk_buff *skb,
*/
if (ipv6_has_hopopt_jumbo(skb))
*hopbyhop = sizeof(struct hop_jumbo_hdr);
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ *lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b187c210d4d6..78c5f40382c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3033,7 +3033,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err;
- err = devlink_port_register(devlink, &info->devlink_port, port);
+ err = devl_port_register(devlink, &info->devlink_port, port);
if (err)
return err;
@@ -3071,7 +3071,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3093,7 +3093,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3109,7 +3109,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
@@ -3333,6 +3333,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
{
+ struct devlink *devlink = priv_to_devlink(priv);
struct mlx4_dev *dev;
unsigned sum = 0;
int err;
@@ -3341,6 +3342,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
+ devl_assert_locked(devlink);
dev = &priv->dev;
INIT_LIST_HEAD(&priv->ctx_list);
@@ -3999,6 +4001,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
priv = devlink_priv(devlink);
dev = &priv->dev;
@@ -4026,6 +4029,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -4035,6 +4039,7 @@ err_params_unregister:
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
+ devl_unlock(devlink);
devlink_free(devlink);
return ret;
}
@@ -4056,8 +4061,11 @@ static void mlx4_unload_one(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int pci_dev_data;
+ struct devlink *devlink;
int p, i;
+ devlink = priv_to_devlink(priv);
+ devl_assert_locked(devlink);
if (priv->removed)
return;
@@ -4137,6 +4145,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_unregister(devlink);
+ devl_lock(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4172,6 +4181,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
+ devl_unlock(devlink);
devlink_free(devlink);
}
@@ -4292,15 +4302,20 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4333,6 +4348,7 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int err;
@@ -4340,6 +4356,8 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
@@ -4358,19 +4376,23 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
}
end:
mutex_unlock(&persist->interface_state_mutex);
-
+ devl_unlock(devlink);
}
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
mlx4_pci_disable_device(dev);
}
@@ -4385,12 +4407,16 @@ static int __maybe_unused mlx4_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(dev, "suspend was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return 0;
}
@@ -4402,6 +4428,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int ret = 0;
@@ -4409,6 +4436,8 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
@@ -4422,6 +4451,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
}
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9ea867a45764..a3773a8177ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o lib/tout.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o
#
# Netdev basic
@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o
+ en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
+ lib/crypto.o
#
# Netdev extra
@@ -45,7 +46,8 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
- en/tc/post_act.o en/tc/int_port.o
+ en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \
+ en/tc/post_meter.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
@@ -53,7 +55,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
en/tc/act/mirred.o en/tc/act/mirred_nic.o \
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
- en/tc/act/redirect_ingress.o
+ en/tc/act/redirect_ingress.o en/tc/act/police.o
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
@@ -67,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \
- esw/devlink_port.o esw/vporttbl.o esw/qos.o
+ esw/debugfs.o esw/devlink_port.o esw/vporttbl.o esw/qos.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 9caa1b52321b..3e232a65a0c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -166,6 +166,28 @@ static const struct file_operations stats_fops = {
.write = average_write,
};
+static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_cmd *cmd;
+ char tbuf[6];
+ int weight;
+ int field;
+ int ret;
+
+ cmd = filp->private_data;
+ weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
+ field = cmd->max_reg_cmds - weight;
+ ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static const struct file_operations slots_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = slots_read,
+};
+
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
@@ -176,6 +198,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
cmd = &dev->priv.dbg.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
+ debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
+
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 50422b56a64d..0571e40c6ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -340,8 +340,10 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
struct auxiliary_driver *adrv;
int ret = 0, i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -389,6 +391,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
mutex_unlock(&mlx5_intf_mutex);
return ret;
}
@@ -401,7 +404,9 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
pm_message_t pm = {};
int i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -430,6 +435,7 @@ skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -438,6 +444,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
{
int ret;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
@@ -450,6 +457,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
@@ -526,16 +534,22 @@ del_adev:
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ int err = 0;
lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
- return 0;
+ goto out;
+
+ err = add_drivers(dev);
- return add_drivers(dev);
+out:
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
+ return err;
}
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index f85166e587f2..66c6a7017695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -104,7 +104,16 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- return mlx5_fw_reset_wait_reset_done(dev);
+ err = mlx5_fw_reset_wait_reset_done(dev);
+ if (err)
+ return err;
+
+ mlx5_unload_one_devl_locked(dev);
+ err = mlx5_health_wait_pci_up(dev);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+
+ return err;
}
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
@@ -134,6 +143,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
+ int ret = 0;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
if (sf_dev_allocated) {
@@ -156,17 +166,21 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one(dev);
- return 0;
+ mlx5_unload_one_devl_locked(dev);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
- return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
- return mlx5_devlink_reload_fw_activate(devlink, extack);
+ ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+ else
+ ret = mlx5_devlink_reload_fw_activate(devlink, extack);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
+
+ return ret;
}
static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
@@ -174,24 +188,27 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int ret = 0;
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- return mlx5_load_one(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return mlx5_load_one(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
- return 0;
+ return ret;
}
static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
@@ -828,28 +845,28 @@ static int mlx5_devlink_traps_register(struct devlink *devlink)
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
- err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
if (err)
return err;
- err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
- &core_dev->priv);
+ err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
if (err)
goto err_trap_group;
return 0;
err_trap_group:
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
return err;
}
static void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
- devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
}
int mlx5_devlink_register(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b6c15efe92ad..a560df446bac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -109,7 +109,7 @@ struct page_pool;
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS \
- ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
@@ -174,8 +174,8 @@ struct page_pool;
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \
- << MLX5_MKEY_BSF_OCTO_SIZE)
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
+ mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
}
-static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
+static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
{
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
* Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
@@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
* than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
* cache-aligned.
*/
-#if L1_CACHE_BYTES < 128
- return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
-#else
- return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2);
+ u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+
+#if L1_CACHE_BYTES >= 128
+ wqebbs = ALIGN_DOWN(wqebbs, 2);
#endif
+ return wqebbs;
}
struct mlx5e_tx_wqe {
@@ -321,7 +322,8 @@ struct mlx5e_params {
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct {
- struct mlx5e_mqprio_rl *rl;
+ u64 max_rate[TC_MAX_QUEUE];
+ u32 hw_id[TC_MAX_QUEUE];
} channel;
} mqprio;
bool rx_cqe_compress_def;
@@ -455,7 +457,7 @@ struct mlx5e_txqsq {
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
- u16 max_sq_mpw_wqebbs;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
@@ -570,7 +572,7 @@ struct mlx5e_xdpsq {
struct device *pdev;
__be32 mkey_be;
u16 stop_room;
- u16 max_sq_mpw_wqebbs;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
unsigned long state;
unsigned int hw_mtu;
@@ -898,16 +900,8 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask;
};
-struct mlx5e_htb {
- DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
- DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
- struct mlx5e_sq_stats **qos_sq_stats;
- u16 max_qos_sqs;
- u16 maj_id;
- u16 defcls;
-};
-
struct mlx5e_trap;
+struct mlx5e_htb;
struct mlx5e_priv {
/* priv data path fields - start */
@@ -928,7 +922,7 @@ struct mlx5e_priv {
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
- struct mlx5e_flow_steering fs;
+ struct mlx5e_flow_steering *fs;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -945,6 +939,8 @@ struct mlx5e_priv {
struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
+ struct mlx5e_sq_stats **htb_qos_sq_stats;
+ u16 htb_max_qos_sqs;
u16 stats_nch;
u16 max_nch;
u8 max_opened_tc;
@@ -976,7 +972,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
struct mlx5e_scratchpad scratchpad;
- struct mlx5e_htb htb;
+ struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
};
@@ -992,6 +988,8 @@ enum mlx5e_profile_feature {
MLX5E_PROFILE_FEATURE_PTP_RX,
MLX5E_PROFILE_FEATURE_PTP_TX,
MLX5E_PROFILE_FEATURE_QOS_HTB,
+ MLX5E_PROFILE_FEATURE_FS_VLAN,
+ MLX5E_PROFILE_FEATURE_FS_TC,
};
struct mlx5e_profile {
@@ -1027,7 +1025,6 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
@@ -1181,7 +1178,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index ae52e7f38306..b69f9d10ccbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -21,6 +21,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
struct netdev_phys_item_id ppid = {};
struct devlink_port *dl_port;
unsigned int dl_port_index;
+ int ret;
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
@@ -41,7 +42,13 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
memset(dl_port, 0, sizeof(*dl_port));
devlink_port_attrs_set(dl_port, &attrs);
- return devlink_port_register(devlink, dl_port, dl_port_index);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_lock(devlink);
+ ret = devl_port_register(devlink, dl_port, dl_port_index);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_unlock(devlink);
+
+ return ret;
}
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
@@ -54,8 +61,13 @@ void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
- devlink_port_unregister(dl_port);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_lock(devlink);
+ devl_port_unregister(dl_port);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_unlock(devlink);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 6e3a90a959e9..9b8cdf2e68ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -15,29 +15,6 @@ enum {
MLX5E_TC_MISS_LEVEL,
};
-struct mlx5e_tc_table {
- /* Protects the dynamic assignment of the t parameter
- * which is the nic tc root table.
- */
- struct mutex t_lock;
- struct mlx5_flow_table *t;
- struct mlx5_flow_table *miss_t;
- struct mlx5_fs_chains *chains;
- struct mlx5e_post_act *post_act;
-
- struct rhashtable ht;
-
- struct mod_hdr_tbl mod_hdr;
- struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-
- struct notifier_block netdevice_nb;
- struct netdev_net_notifier netdevice_nn;
-
- struct mlx5_tc_ct_priv *ct;
- struct mapping_ctx *mapping;
-};
-
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
@@ -160,16 +137,20 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
struct mlx5e_accel_fs_tcp;
#endif
+struct mlx5e_profile;
struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
+ bool state_destroy;
+ bool vlan_strip_disable;
+ struct mlx5_core_dev *mdev;
struct mlx5_flow_namespace *ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
- struct mlx5e_tc_table tc;
+ struct mlx5e_tc_table *tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
@@ -200,13 +181,22 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-int mlx5e_fs_init(struct mlx5e_priv *priv);
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy);
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
-
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index 7aa25a5e29d7..e153d6119e02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -94,7 +94,7 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_udp = priv->fs.udp;
+ fs_udp = priv->fs->udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
@@ -121,10 +121,10 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ
struct mlx5e_fs_udp *fs_udp;
int err;
- fs_udp = priv->fs.udp;
+ fs_udp = priv->fs->udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -208,7 +208,7 @@ out:
static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
+ struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -218,7 +218,7 @@ static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -278,10 +278,10 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- dest.ft = priv->fs.udp->tables[i].t;
+ dest.ft = priv->fs->udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -294,7 +294,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
{
- struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
+ struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
int i;
if (!fs_udp)
@@ -309,20 +309,20 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
- priv->fs.udp = NULL;
+ priv->fs->udp = NULL;
}
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
{
int i, err;
- if (priv->fs.udp) {
- priv->fs.udp->ref_cnt++;
+ if (priv->fs->udp) {
+ priv->fs->udp->ref_cnt++;
return 0;
}
- priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
- if (!priv->fs.udp)
+ priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
+ if (!priv->fs->udp)
return -ENOMEM;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
@@ -335,16 +335,16 @@ int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_tables;
- priv->fs.udp->ref_cnt = 1;
+ priv->fs->udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
- fs_udp_destroy_table(priv->fs.udp, i);
+ fs_udp_destroy_table(priv->fs->udp, i);
- kfree(priv->fs.udp);
- priv->fs.udp = NULL;
+ kfree(priv->fs->udp);
+ priv->fs->udp = NULL;
return err;
}
@@ -371,7 +371,7 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_any = priv->fs.any;
+ fs_any = priv->fs->any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
@@ -398,10 +398,10 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv)
struct mlx5e_fs_any *fs_any;
int err;
- fs_any = priv->fs.any;
+ fs_any = priv->fs->any;
fs_any_t = &fs_any->table;
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -474,7 +474,7 @@ err:
static int fs_any_create_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fs.any->table;
+ struct mlx5e_flow_table *ft = &priv->fs->any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -484,7 +484,7 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -514,7 +514,7 @@ static int fs_any_disable(struct mlx5e_priv *priv)
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -530,10 +530,10 @@ static int fs_any_enable(struct mlx5e_priv *priv)
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.any->table.t;
+ dest.ft = priv->fs->any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -555,7 +555,7 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
{
- struct mlx5e_fs_any *fs_any = priv->fs.any;
+ struct mlx5e_fs_any *fs_any = priv->fs->any;
if (!fs_any)
return;
@@ -568,20 +568,20 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
fs_any_destroy_table(fs_any);
kfree(fs_any);
- priv->fs.any = NULL;
+ priv->fs->any = NULL;
}
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
{
int err;
- if (priv->fs.any) {
- priv->fs.any->ref_cnt++;
+ if (priv->fs->any) {
+ priv->fs->any->ref_cnt++;
return 0;
}
- priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
- if (!priv->fs.any)
+ priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
+ if (!priv->fs->any)
return -ENOMEM;
err = fs_any_create_table(priv);
@@ -592,14 +592,14 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_table;
- priv->fs.any->ref_cnt = 1;
+ priv->fs->any->ref_cnt = 1;
return 0;
err_destroy_table:
- fs_any_destroy_table(priv->fs.any);
+ fs_any_destroy_table(priv->fs->any);
- kfree(priv->fs.any);
- priv->fs.any = NULL;
+ kfree(priv->fs->any);
+ priv->fs->any = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
new file mode 100644
index 000000000000..6dac76fa58a3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/pkt_cls.h>
+#include "htb.h"
+#include "en.h"
+#include "../qos.h"
+
+struct mlx5e_qos_node {
+ struct hlist_node hnode;
+ struct mlx5e_qos_node *parent;
+ u64 rate;
+ u32 bw_share;
+ u32 max_average_bw;
+ u32 hw_id;
+ u32 classid; /* 16-bit, except root. */
+ u16 qid;
+};
+
+struct mlx5e_htb {
+ DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
+ DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ struct mlx5e_selq *selq;
+};
+
+#define MLX5E_QOS_QID_INNER 0xffff
+#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
+
+/* Software representation of the QoS tree */
+
+int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt, err;
+
+ hash_for_each(htb->qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ err = callback(data, node->qid, node->hw_id);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb)
+{
+ int last;
+
+ last = find_last_bit(htb->qos_used_qids, mlx5e_qos_max_leaf_nodes(htb->mdev));
+ return last == mlx5e_qos_max_leaf_nodes(htb->mdev) ? 0 : last + 1;
+}
+
+static int mlx5e_htb_find_unused_qos_qid(struct mlx5e_htb *htb)
+{
+ int size = mlx5e_qos_max_leaf_nodes(htb->mdev);
+ struct mlx5e_priv *priv = htb->priv;
+ int res;
+
+ WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
+ res = find_first_zero_bit(htb->qos_used_qids, size);
+
+ return res == size ? -ENOSPC : res;
+}
+
+static struct mlx5e_qos_node *
+mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid,
+ struct mlx5e_qos_node *parent)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = parent;
+
+ node->qid = qid;
+ __set_bit(qid, htb->qos_used_qids);
+
+ node->classid = classid;
+ hash_add_rcu(htb->qos_tc2node, &node->hnode, classid);
+
+ mlx5e_update_tx_netdev_queues(htb->priv);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_create_root(struct mlx5e_htb *htb)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->qid = MLX5E_QOS_QID_INNER;
+ node->classid = MLX5E_HTB_CLASSID_ROOT;
+ hash_add_rcu(htb->qos_tc2node, &node->hnode, node->classid);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find(struct mlx5e_htb *htb, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible(htb->qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find_rcu(struct mlx5e_htb *htb, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible_rcu(htb->qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static void mlx5e_htb_node_delete(struct mlx5e_htb *htb, struct mlx5e_qos_node *node)
+{
+ hash_del_rcu(&node->hnode);
+ if (node->qid != MLX5E_QOS_QID_INNER) {
+ __clear_bit(node->qid, htb->qos_used_qids);
+ mlx5e_update_tx_netdev_queues(htb->priv);
+ }
+ /* Make sure this qid is no longer selected by mlx5e_select_queue, so
+ * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
+ */
+ synchronize_net();
+ kfree(node);
+}
+
+/* TX datapath API */
+
+int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid)
+{
+ struct mlx5e_qos_node *node;
+ u16 qid;
+ int res;
+
+ rcu_read_lock();
+
+ node = mlx5e_htb_node_find_rcu(htb, classid);
+ if (!node) {
+ res = -ENOENT;
+ goto out;
+ }
+ qid = READ_ONCE(node->qid);
+ if (qid == MLX5E_QOS_QID_INNER) {
+ res = -EINVAL;
+ goto out;
+ }
+ res = mlx5e_qid_from_qos(&htb->priv->channels, qid);
+
+out:
+ rcu_read_unlock();
+ return res;
+}
+
+/* HTB TC handlers */
+
+static int
+mlx5e_htb_root_add(struct mlx5e_htb *htb, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *root;
+ bool opened;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
+
+ mlx5e_selq_prepare_htb(htb->selq, htb_maj_id, htb_defcls);
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ err = mlx5e_qos_alloc_queues(priv, &priv->channels);
+ if (err)
+ goto err_cancel_selq;
+ }
+
+ root = mlx5e_htb_node_create_root(htb);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto err_free_queues;
+ }
+
+ err = mlx5_qos_create_root_node(htb->mdev, &root->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
+ goto err_sw_node_delete;
+ }
+
+ mlx5e_selq_apply(htb->selq);
+
+ return 0;
+
+err_sw_node_delete:
+ mlx5e_htb_node_delete(htb, root);
+
+err_free_queues:
+ if (opened)
+ mlx5e_qos_close_all_queues(&priv->channels);
+err_cancel_selq:
+ mlx5e_selq_cancel(htb->selq);
+ return err;
+}
+
+static int mlx5e_htb_root_del(struct mlx5e_htb *htb)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *root;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_DESTROY\n");
+
+ /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
+ * so that we can safely switch to its non-HTB non-PTP fastpath.
+ */
+ synchronize_net();
+
+ mlx5e_selq_prepare_htb(htb->selq, 0, 0);
+ mlx5e_selq_apply(htb->selq);
+
+ root = mlx5e_htb_node_find(htb, MLX5E_HTB_CLASSID_ROOT);
+ if (!root) {
+ qos_err(htb->mdev, "Failed to find the root node in the QoS tree\n");
+ return -ENOENT;
+ }
+ err = mlx5_qos_destroy_node(htb->mdev, root->hw_id);
+ if (err)
+ qos_err(htb->mdev, "Failed to destroy root node %u, err = %d\n",
+ root->hw_id, err);
+ mlx5e_htb_node_delete(htb, root);
+
+ mlx5e_qos_deactivate_all_queues(&priv->channels);
+ mlx5e_qos_close_all_queues(&priv->channels);
+
+ return err;
+}
+
+static int mlx5e_htb_convert_rate(struct mlx5e_htb *htb, u64 rate,
+ struct mlx5e_qos_node *parent, u32 *bw_share)
+{
+ u64 share = 0;
+
+ while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
+ parent = parent->parent;
+
+ if (parent->max_average_bw)
+ share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
+ parent->max_average_bw);
+ else
+ share = 101;
+
+ *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
+
+ qos_dbg(htb->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
+ rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
+
+ return 0;
+}
+
+static void mlx5e_htb_convert_ceil(struct mlx5e_htb *htb, u64 ceil, u32 *max_average_bw)
+{
+ /* Hardware treats 0 as "unlimited", set at least 1. */
+ *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
+
+ qos_dbg(htb->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
+ ceil, *max_average_bw);
+}
+
+int
+mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ struct mlx5e_priv *priv = htb->priv;
+ int qid;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
+ classid, parent_classid, rate, ceil);
+
+ qid = mlx5e_htb_find_unused_qos_qid(htb);
+ if (qid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
+ return qid;
+ }
+
+ parent = mlx5e_htb_node_find(htb, parent_classid);
+ if (!parent)
+ return -EINVAL;
+
+ node = mlx5e_htb_node_create_leaf(htb, classid, qid, parent);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &node->bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &node->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ mlx5e_htb_node_delete(htb, node);
+ return err;
+ }
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ return mlx5e_qid_from_qos(&priv->channels, node->qid);
+}
+
+int
+mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *child;
+ struct mlx5e_priv *priv = htb->priv;
+ int err, tmp_err;
+ u32 new_hw_id;
+ u16 qid;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
+ classid, child_classid, rate, ceil);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_inner_node(htb->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
+ qos_err(htb->mdev, "Failed to create an inner node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ /* Intentionally reuse the qid for the upcoming first child. */
+ child = mlx5e_htb_node_create_leaf(htb, child_classid, node->qid, node);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto err_destroy_hw_node;
+ }
+
+ child->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node, &child->bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &child->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, new_hw_id, child->bw_share,
+ child->max_average_bw, &child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ goto err_delete_sw_node;
+ }
+
+ /* No fail point. */
+
+ qid = node->qid;
+ /* Pairs with mlx5e_htb_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, child->qid, child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, child->qid, child->hw_id);
+ }
+ }
+
+ return 0;
+
+err_delete_sw_node:
+ child->qid = MLX5E_QOS_QID_INNER;
+ mlx5e_htb_node_delete(htb, child);
+
+err_destroy_hw_node:
+ tmp_err = mlx5_qos_destroy_node(htb->mdev, new_hw_id);
+ if (tmp_err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
+ new_hw_id, classid, tmp_err);
+ return err;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find_by_qid(struct mlx5e_htb *htb, u16 qid)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(htb->qos_tc2node, bkt, node, hnode)
+ if (node->qid == qid)
+ break;
+
+ return node;
+}
+
+int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *node;
+ struct netdev_queue *txq;
+ u16 qid, moved_qid;
+ bool opened;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
+
+ node = mlx5e_htb_node_find(htb, *classid);
+ if (!node)
+ return -ENOENT;
+
+ /* Store qid for reuse. */
+ qid = node->qid;
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ txq = netdev_get_tx_queue(htb->netdev,
+ mlx5e_qid_from_qos(&priv->channels, qid));
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, *classid, err);
+
+ mlx5e_htb_node_delete(htb, node);
+
+ moved_qid = mlx5e_htb_cur_leaf_nodes(htb);
+
+ if (moved_qid == 0) {
+ /* The last QoS SQ was just destroyed. */
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+ moved_qid--;
+
+ if (moved_qid < qid) {
+ /* The highest QoS SQ was just destroyed. */
+ WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
+ qid, moved_qid);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+
+ WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
+ qos_dbg(htb->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
+
+ node = mlx5e_htb_node_find_by_qid(htb, moved_qid);
+ WARN(!node, "Could not find a node with qid %u to move to queue %u",
+ moved_qid, qid);
+
+ /* Stop traffic to the old queue. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+ __clear_bit(moved_qid, priv->htb->qos_used_qids);
+
+ if (opened) {
+ txq = netdev_get_tx_queue(htb->netdev,
+ mlx5e_qid_from_qos(&priv->channels, moved_qid));
+ mlx5e_deactivate_qos_sq(priv, moved_qid);
+ mlx5e_close_qos_sq(priv, moved_qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(htb->netdev, moved_qid);
+
+ __set_bit(qid, htb->qos_used_qids);
+ WRITE_ONCE(node->qid, qid);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
+ node->classid, moved_qid, qid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ mlx5e_update_tx_netdev_queues(priv);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
+
+ *classid = node->classid;
+ return 0;
+}
+
+int
+mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ struct mlx5e_priv *priv = htb->priv;
+ u32 old_hw_id, new_hw_id;
+ int err, saved_err = 0;
+ u16 qid;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
+ force ? "_FORCE" : "", classid);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->parent->hw_id,
+ node->parent->bw_share,
+ node->parent->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ if (!force)
+ return err;
+ saved_err = err;
+ }
+
+ /* Store qid for reuse and prevent clearing the bit. */
+ qid = node->qid;
+ /* Pairs with mlx5e_htb_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(htb->netdev, qid);
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ parent = node->parent;
+ mlx5e_htb_node_delete(htb, node);
+
+ node = parent;
+ WRITE_ONCE(node->qid, qid);
+
+ /* Early return on error in force mode. Parent will still be an inner
+ * node to be deleted by a following delete operation.
+ */
+ if (saved_err)
+ return saved_err;
+
+ old_hw_id = node->hw_id;
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, old_hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ return 0;
+}
+
+static int
+mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *child;
+ int err = 0;
+ int bkt;
+
+ hash_for_each(htb->qos_tc2node, bkt, child, hnode) {
+ u32 old_bw_share = child->bw_share;
+ int err_one;
+
+ if (child->parent != node)
+ continue;
+
+ mlx5e_htb_convert_rate(htb, child->rate, node, &child->bw_share);
+ if (child->bw_share == old_bw_share)
+ continue;
+
+ err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
+ child->max_average_bw, child->hw_id);
+ if (!err && err_one) {
+ err = err_one;
+
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
+ qos_err(htb->mdev, "Failed to modify a child node (class %04x), err = %d\n",
+ node->classid, err);
+ }
+ }
+
+ return err;
+}
+
+int
+mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ u32 bw_share, max_average_bw;
+ struct mlx5e_qos_node *node;
+ bool ceil_changed = false;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
+ classid, rate, ceil);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
+
+ err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
+ max_average_bw, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+ qos_err(htb->mdev, "Failed to modify a node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ if (max_average_bw != node->max_average_bw)
+ ceil_changed = true;
+
+ node->bw_share = bw_share;
+ node->max_average_bw = max_average_bw;
+
+ if (ceil_changed)
+ err = mlx5e_htb_update_children(htb, node, extack);
+
+ return err;
+}
+
+struct mlx5e_htb *mlx5e_htb_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_htb), GFP_KERNEL);
+}
+
+void mlx5e_htb_free(struct mlx5e_htb *htb)
+{
+ kvfree(htb);
+}
+
+int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
+ struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_selq *selq, struct mlx5e_priv *priv)
+{
+ htb->mdev = mdev;
+ htb->netdev = netdev;
+ htb->selq = selq;
+ htb->priv = priv;
+ hash_init(htb->qos_tc2node);
+ return mlx5e_htb_root_add(htb, htb_qopt->parent_classid, htb_qopt->classid,
+ htb_qopt->extack);
+}
+
+void mlx5e_htb_cleanup(struct mlx5e_htb *htb)
+{
+ mlx5e_htb_root_del(htb);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
new file mode 100644
index 000000000000..8386f1ea4559
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_EN_HTB_H_
+#define __MLX5E_EN_HTB_H_
+
+#include "qos.h"
+
+#define MLX5E_QOS_MAX_LEAF_NODES 256
+
+struct mlx5e_selq;
+struct mlx5e_htb;
+
+typedef int (*mlx5e_fp_htb_enumerate)(void *data, u16 qid, u32 hw_id);
+int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data);
+
+int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb);
+
+/* TX datapath API */
+int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid);
+
+/* HTB TC handlers */
+
+int
+mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+struct mlx5e_htb *mlx5e_htb_alloc(void);
+void mlx5e_htb_free(struct mlx5e_htb *htb);
+int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
+ struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_selq *selq, struct mlx5e_priv *priv);
+void mlx5e_htb_cleanup(struct mlx5e_htb *htb);
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3c1edfa33aa7..e025040350ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -790,8 +790,20 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
+
+ /* If XDP program is attached, XSK may be turned on at any time without
+ * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
+ * both regular RQ and XSK RQ.
+ * Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
+ * doesn't affect its return value, as long as params->xdp_prog != NULL,
+ * so we can just multiply by 2.
+ */
+ if (params->xdp_prog)
+ wqebbs *= 2;
+
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
+
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 047f88f09203..903de88bab53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
+#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
+static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+ return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
+}
+
+static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+ struct skb_shared_hwtstamps hwts = {};
+ struct sk_buff *skb;
+
+ ptpsq->cq_stats->resync_event++;
+
+ while (skb_cc != skb_id) {
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+ skb_tstamp_tx(skb, &hwts);
+ ptpsq->cq_stats->resync_cqe++;
+ skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ }
+}
+
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
int budget)
{
- struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+ u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ struct sk_buff *skb;
ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
ptpsq->cq_stats->err_cqe++;
goto out;
}
+ if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
+ mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
+
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
+ struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
GFP_KERNEL, numa);
@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
ptpsq->skb_fifo.mask = wq_sz - 1;
-
+ if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+ ptpsq->ts_cqe_ctr_mask =
+ (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0;
}
@@ -591,7 +624,7 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!ptp_fs->valid)
return;
@@ -608,7 +641,7 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
struct mlx5_flow_handle *rule;
int err;
@@ -775,13 +808,13 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
if (!ptp_fs)
return -ENOMEM;
- priv->fs.ptp_fs = ptp_fs;
+ priv->fs->ptp_fs = ptp_fs;
return 0;
}
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index a71a32e00ebb..92dbbec472ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -17,6 +17,7 @@ struct mlx5e_ptpsq {
u16 skb_fifo_pc;
struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats;
+ u16 ts_cqe_ctr_mask;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 9db677e9ca9c..2842195ee548 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -2,11 +2,16 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
#include "en.h"
#include "params.h"
#include "../qos.h"
+#include "en/htb.h"
-#define BYTES_IN_MBIT 125000
+struct qos_sq_callback_params {
+ struct mlx5e_priv *priv;
+ struct mlx5e_channels *chs;
+};
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
{
@@ -28,124 +33,14 @@ int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
}
-int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
-{
- int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
-
- return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
-}
-
-/* Software representation of the QoS tree (internal to this file) */
-
-static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
-{
- int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
- int res;
-
- WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
- res = find_first_zero_bit(priv->htb.qos_used_qids, size);
-
- return res == size ? -ENOSPC : res;
-}
-
-struct mlx5e_qos_node {
- struct hlist_node hnode;
- struct mlx5e_qos_node *parent;
- u64 rate;
- u32 bw_share;
- u32 max_average_bw;
- u32 hw_id;
- u32 classid; /* 16-bit, except root. */
- u16 qid;
-};
-
-#define MLX5E_QOS_QID_INNER 0xffff
-#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
-
-static struct mlx5e_qos_node *
-mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
- struct mlx5e_qos_node *parent)
-{
- struct mlx5e_qos_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->parent = parent;
-
- node->qid = qid;
- __set_bit(qid, priv->htb.qos_used_qids);
-
- node->classid = classid;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
-
- mlx5e_update_tx_netdev_queues(priv);
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
-{
- struct mlx5e_qos_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->qid = MLX5E_QOS_QID_INNER;
- node->classid = MLX5E_HTB_CLASSID_ROOT;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
-{
- struct mlx5e_qos_node *node = NULL;
-
- hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
- if (node->classid == classid)
- break;
- }
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
-{
- struct mlx5e_qos_node *node = NULL;
-
- hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
- if (node->classid == classid)
- break;
- }
-
- return node;
-}
-
-static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
-{
- hash_del_rcu(&node->hnode);
- if (node->qid != MLX5E_QOS_QID_INNER) {
- __clear_bit(node->qid, priv->htb.qos_used_qids);
- mlx5e_update_tx_netdev_queues(priv);
- }
- /* Make sure this qid is no longer selected by mlx5e_select_queue, so
- * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
- */
- synchronize_net();
- kfree(node);
-}
-
/* TX datapath API */
-static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
+u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
{
/* These channel params are safe to access from the datapath, because:
- * 1. This function is called only after checking priv->htb.maj_id != 0,
+ * 1. This function is called only after checking selq->htb_maj_id != 0,
* and the number of queues can't change while HTB offload is active.
- * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
+ * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
* mlx5e_select_queue to finish while holding priv->state_lock,
* preventing other code from changing the number of queues.
*/
@@ -154,30 +49,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
}
-int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
-{
- struct mlx5e_qos_node *node;
- u16 qid;
- int res;
-
- rcu_read_lock();
-
- node = mlx5e_sw_node_find_rcu(priv, classid);
- if (!node) {
- res = -ENOENT;
- goto out;
- }
- qid = READ_ONCE(node->qid);
- if (qid == MLX5E_QOS_QID_INNER) {
- res = -EINVAL;
- goto out;
- }
- res = mlx5e_qid_from_qos(&priv->channels, qid);
-
-out:
- rcu_read_unlock();
- return res;
-}
+/* SQ lifecycle */
static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
{
@@ -194,10 +66,8 @@ static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
return mlx5e_state_dereference(priv, qos_sqs[qid]);
}
-/* SQ lifecycle */
-
-static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
- struct mlx5e_qos_node *node)
+int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ u16 node_qid, u32 hw_id)
{
struct mlx5e_create_cq_param ccp = {};
struct mlx5e_txqsq __rcu **qos_sqs;
@@ -210,13 +80,13 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
params = &chs->params;
- txq_ix = mlx5e_qid_from_qos(chs, node->qid);
+ txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node->qid > priv->htb.max_qos_sqs);
- if (node->qid == priv->htb.max_qos_sqs) {
+ WARN_ON(node_qid > priv->htb_max_qos_sqs);
+ if (node_qid == priv->htb_max_qos_sqs) {
struct mlx5e_sq_stats *stats, **stats_list = NULL;
- if (priv->htb.max_qos_sqs == 0) {
+ if (priv->htb_max_qos_sqs == 0) {
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
sizeof(*stats_list),
GFP_KERNEL);
@@ -229,16 +99,16 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
return -ENOMEM;
}
if (stats_list)
- WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
- WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
- /* Order max_qos_sqs increment after writing the array pointer.
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
+ /* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
*/
- smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+ smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
}
- ix = node->qid % params->num_channels;
- qid = node->qid / params->num_channels;
+ ix = node_qid % params->num_channels;
+ qid = node_qid / params->num_channels;
c = chs->c[ix];
qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
@@ -257,8 +127,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err)
goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, node->hw_id,
- priv->htb.qos_sq_stats[node->qid]);
+ &param_sq, sq, 0, hw_id,
+ priv->htb_qos_sq_stats[node_qid]);
if (err)
goto err_close_cq;
@@ -273,14 +143,22 @@ err_free_sq:
return err;
}
-static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+static int mlx5e_open_qos_sq_cb_wrapper(void *data, u16 node_qid, u32 hw_id)
+{
+ struct qos_sq_callback_params *cb_params = data;
+
+ return mlx5e_open_qos_sq(cb_params->priv, cb_params->chs, node_qid, hw_id);
+}
+
+int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
{
+ struct mlx5e_priv *priv = data;
struct mlx5e_txqsq *sq;
u16 qid;
- sq = mlx5e_get_qos_sq(priv, node->qid);
+ sq = mlx5e_get_qos_sq(priv, node_qid);
- qid = mlx5e_qid_from_qos(&priv->channels, node->qid);
+ qid = mlx5e_qid_from_qos(&priv->channels, node_qid);
/* If it's a new queue, it will be marked as started at this point.
* Stop it before updating txq2sq.
@@ -295,11 +173,13 @@ static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node
*/
smp_wmb();
- qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid);
+ qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq);
+
+ return 0;
}
-static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
+void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq *sq;
@@ -319,7 +199,7 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
smp_wmb();
}
-static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
+void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_params *params;
@@ -369,7 +249,7 @@ void mlx5e_qos_close_queues(struct mlx5e_channel *c)
kvfree(qos_sqs);
}
-static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
+void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
{
int i;
@@ -377,7 +257,7 @@ static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_close_queues(chs->c[i]);
}
-static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
u16 qos_sqs_size;
int i;
@@ -413,24 +293,20 @@ err_free:
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
- struct mlx5e_qos_node *node = NULL;
- int bkt, err;
-
- if (!priv->htb.maj_id)
- return 0;
+ struct qos_sq_callback_params callback_params;
+ int err;
err = mlx5e_qos_alloc_queues(priv, chs);
if (err)
return err;
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
- if (node->qid == MLX5E_QOS_QID_INNER)
- continue;
- err = mlx5e_open_qos_sq(priv, chs, node);
- if (err) {
- mlx5e_qos_close_all_queues(chs);
- return err;
- }
+ callback_params.priv = priv;
+ callback_params.chs = chs;
+
+ err = mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_open_qos_sq_cb_wrapper, &callback_params);
+ if (err) {
+ mlx5e_qos_close_all_queues(chs);
+ return err;
}
return 0;
@@ -438,14 +314,7 @@ int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
{
- struct mlx5e_qos_node *node = NULL;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
- if (node->qid == MLX5E_QOS_QID_INNER)
- continue;
- mlx5e_activate_qos_sq(priv, node);
- }
+ mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_activate_qos_sq, priv);
}
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
@@ -474,7 +343,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
}
}
-static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
+void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
{
int i;
@@ -482,293 +351,14 @@ static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_deactivate_queues(chs->c[i]);
}
-/* HTB API */
-
-int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *root;
- bool opened;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
-
- if (!mlx5_qos_is_supported(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
- return -EOPNOTSUPP;
- }
-
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (opened) {
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
-
- err = mlx5e_qos_alloc_queues(priv, &priv->channels);
- if (err)
- goto err_cancel_selq;
- }
-
- root = mlx5e_sw_node_create_root(priv);
- if (IS_ERR(root)) {
- err = PTR_ERR(root);
- goto err_free_queues;
- }
-
- err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
- goto err_sw_node_delete;
- }
-
- WRITE_ONCE(priv->htb.defcls, htb_defcls);
- /* Order maj_id after defcls - pairs with
- * mlx5e_select_queue/mlx5e_select_htb_queues.
- */
- smp_store_release(&priv->htb.maj_id, htb_maj_id);
-
- if (opened)
- mlx5e_selq_apply(&priv->selq);
-
- return 0;
-
-err_sw_node_delete:
- mlx5e_sw_node_delete(priv, root);
-
-err_free_queues:
- if (opened)
- mlx5e_qos_close_all_queues(&priv->channels);
-err_cancel_selq:
- mlx5e_selq_cancel(&priv->selq);
- return err;
-}
-
-int mlx5e_htb_root_del(struct mlx5e_priv *priv)
-{
- struct mlx5e_qos_node *root;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
-
- /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
- * so that we can safely switch to its non-HTB non-PTP fastpath.
- */
- synchronize_net();
-
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false);
- mlx5e_selq_apply(&priv->selq);
-
- WRITE_ONCE(priv->htb.maj_id, 0);
-
- root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
- if (!root) {
- qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
- return -ENOENT;
- }
- err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
- if (err)
- qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
- root->hw_id, err);
- mlx5e_sw_node_delete(priv, root);
-
- mlx5e_qos_deactivate_all_queues(&priv->channels);
- mlx5e_qos_close_all_queues(&priv->channels);
-
- return err;
-}
-
-static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
- struct mlx5e_qos_node *parent, u32 *bw_share)
-{
- u64 share = 0;
-
- while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
- parent = parent->parent;
-
- if (parent->max_average_bw)
- share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
- parent->max_average_bw);
- else
- share = 101;
-
- *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
-
- qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
- rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
-
- return 0;
-}
-
-static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
-{
- /* Hardware treats 0 as "unlimited", set at least 1. */
- *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
-
- qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
- ceil, *max_average_bw);
-}
-
-int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
- u32 parent_classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *parent;
- int qid;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
- classid, parent_classid, rate, ceil);
-
- qid = mlx5e_find_unused_qos_qid(priv);
- if (qid < 0) {
- NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
- return qid;
- }
-
- parent = mlx5e_sw_node_find(priv, parent_classid);
- if (!parent)
- return -EINVAL;
-
- node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
- if (IS_ERR(node))
- return PTR_ERR(node);
-
- node->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
-
- err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
- node->bw_share, node->max_average_bw,
- &node->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- mlx5e_sw_node_delete(priv, node);
- return err;
- }
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
- }
- }
-
- return mlx5e_qid_from_qos(&priv->channels, node->qid);
-}
-
-int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
- u64 rate, u64 ceil, struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *child;
- int err, tmp_err;
- u32 new_hw_id;
- u16 qid;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
- classid, child_classid, rate, ceil);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
- node->bw_share, node->max_average_bw,
- &new_hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
- qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
- classid, err);
- return err;
- }
-
- /* Intentionally reuse the qid for the upcoming first child. */
- child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
- if (IS_ERR(child)) {
- err = PTR_ERR(child);
- goto err_destroy_hw_node;
- }
-
- child->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
-
- err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
- child->max_average_bw, &child->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- goto err_delete_sw_node;
- }
-
- /* No fail point. */
-
- qid = node->qid;
- /* Pairs with mlx5e_get_txq_by_classid. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- node->hw_id = new_hw_id;
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, child);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, child);
- }
- }
-
- return 0;
-
-err_delete_sw_node:
- child->qid = MLX5E_QOS_QID_INNER;
- mlx5e_sw_node_delete(priv, child);
-
-err_destroy_hw_node:
- tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
- if (tmp_err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
- new_hw_id, classid, tmp_err);
- return err;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
-{
- struct mlx5e_qos_node *node = NULL;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
- if (node->qid == qid)
- break;
-
- return node;
-}
-
-static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
+void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
{
qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
netdev_tx_reset_queue(txq);
netif_tx_start_queue(txq);
}
-static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
+void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
{
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
@@ -781,251 +371,65 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
spin_unlock_bh(qdisc_lock(qdisc));
}
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
- struct netlink_ext_ack *extack)
+int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt)
{
- struct mlx5e_qos_node *node;
- struct netdev_queue *txq;
- u16 qid, moved_qid;
- bool opened;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
-
- node = mlx5e_sw_node_find(priv, *classid);
- if (!node)
- return -ENOENT;
-
- /* Store qid for reuse. */
- qid = node->qid;
-
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (opened) {
- txq = netdev_get_tx_queue(priv->netdev,
- mlx5e_qid_from_qos(&priv->channels, qid));
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, *classid, err);
-
- mlx5e_sw_node_delete(priv, node);
-
- moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
-
- if (moved_qid == 0) {
- /* The last QoS SQ was just destroyed. */
- if (opened)
- mlx5e_reactivate_qos_sq(priv, qid, txq);
- return 0;
- }
- moved_qid--;
-
- if (moved_qid < qid) {
- /* The highest QoS SQ was just destroyed. */
- WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
- qid, moved_qid);
- if (opened)
- mlx5e_reactivate_qos_sq(priv, qid, txq);
- return 0;
- }
-
- WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
- qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
-
- node = mlx5e_sw_node_find_by_qid(priv, moved_qid);
- WARN(!node, "Could not find a node with qid %u to move to queue %u",
- moved_qid, qid);
-
- /* Stop traffic to the old queue. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
- __clear_bit(moved_qid, priv->htb.qos_used_qids);
-
- if (opened) {
- txq = netdev_get_tx_queue(priv->netdev,
- mlx5e_qid_from_qos(&priv->channels, moved_qid));
- mlx5e_deactivate_qos_sq(priv, moved_qid);
- mlx5e_close_qos_sq(priv, moved_qid);
- }
-
- /* Prevent packets from the old class from getting into the new one. */
- mlx5e_reset_qdisc(priv->netdev, moved_qid);
-
- __set_bit(qid, priv->htb.qos_used_qids);
- WRITE_ONCE(node->qid, qid);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
- node->classid, moved_qid, qid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
- }
- }
-
- mlx5e_update_tx_netdev_queues(priv);
- if (opened)
- mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
-
- *classid = node->classid;
- return 0;
-}
-
-int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *parent;
- u32 old_hw_id, new_hw_id;
- int err, saved_err = 0;
- u16 qid;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
- force ? "_FORCE" : "", classid);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id,
- node->parent->bw_share,
- node->parent->max_average_bw,
- &new_hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- if (!force)
- return err;
- saved_err = err;
- }
-
- /* Store qid for reuse and prevent clearing the bit. */
- qid = node->qid;
- /* Pairs with mlx5e_get_txq_by_classid. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- /* Prevent packets from the old class from getting into the new one. */
- mlx5e_reset_qdisc(priv->netdev, qid);
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- parent = node->parent;
- mlx5e_sw_node_delete(priv, node);
+ struct mlx5e_htb *htb = priv->htb;
+ int res;
- node = parent;
- WRITE_ONCE(node->qid, qid);
+ if (!htb && htb_qopt->command != TC_HTB_CREATE)
+ return -EINVAL;
- /* Early return on error in force mode. Parent will still be an inner
- * node to be deleted by a following delete operation.
- */
- if (saved_err)
- return saved_err;
-
- old_hw_id = node->hw_id;
- node->hw_id = new_hw_id;
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
+ switch (htb_qopt->command) {
+ case TC_HTB_CREATE:
+ if (!mlx5_qos_is_supported(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(htb_qopt->extack,
+ "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
}
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- return 0;
-}
-
-static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *child;
- int err = 0;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
- u32 old_bw_share = child->bw_share;
- int err_one;
-
- if (child->parent != node)
- continue;
-
- mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
- if (child->bw_share == old_bw_share)
- continue;
-
- err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
- child->max_average_bw, child->hw_id);
- if (!err && err_one) {
- err = err_one;
-
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
- qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
- node->classid, err);
+ priv->htb = mlx5e_htb_alloc();
+ htb = priv->htb;
+ if (!htb)
+ return -ENOMEM;
+ res = mlx5e_htb_init(htb, htb_qopt, priv->netdev, priv->mdev, &priv->selq, priv);
+ if (res) {
+ mlx5e_htb_free(htb);
+ priv->htb = NULL;
}
+ return res;
+ case TC_HTB_DESTROY:
+ mlx5e_htb_cleanup(htb);
+ mlx5e_htb_free(htb);
+ priv->htb = NULL;
+ return 0;
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = mlx5e_htb_leaf_alloc_queue(htb, htb_qopt->classid, htb_qopt->parent_classid,
+ htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
+ if (res < 0)
+ return res;
+ htb_qopt->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return mlx5e_htb_leaf_to_inner(htb, htb_qopt->parent_classid, htb_qopt->classid,
+ htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
+ case TC_HTB_LEAF_DEL:
+ return mlx5e_htb_leaf_del(htb, &htb_qopt->classid, htb_qopt->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return mlx5e_htb_leaf_del_last(htb, htb_qopt->classid,
+ htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb_qopt->extack);
+ case TC_HTB_NODE_MODIFY:
+ return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil,
+ htb_qopt->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = mlx5e_htb_get_txq_by_classid(htb, htb_qopt->classid);
+ if (res < 0)
+ return res;
+ htb_qopt->qid = res;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
-
- return err;
-}
-
-int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack)
-{
- u32 bw_share, max_average_bw;
- struct mlx5e_qos_node *node;
- bool ceil_changed = false;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
- classid, rate, ceil);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- node->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
-
- err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
- max_average_bw, node->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
- qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
- classid, err);
- return err;
- }
-
- if (max_average_bw != node->max_average_bw)
- ceil_changed = true;
-
- node->bw_share = bw_share;
- node->max_average_bw = max_average_bw;
-
- if (ceil_changed)
- err = mlx5e_qos_update_children(priv, node, extack);
-
- return err;
}
struct mlx5e_mqprio_rl {
@@ -1111,3 +515,4 @@ int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_i
*hw_id = rl->leaves_id[tc];
return 0;
}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index 5d9bd91d86c2..4947afa23b73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -6,40 +6,39 @@
#include <linux/mlx5/driver.h>
-#define MLX5E_QOS_MAX_LEAF_NODES 256
+#define BYTES_IN_MBIT 125000
struct mlx5e_priv;
+struct mlx5e_htb;
struct mlx5e_channels;
struct mlx5e_channel;
+struct tc_htb_qopt_offload;
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
-int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
-
-/* TX datapath API */
-int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
/* SQ lifecycle */
+int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ u16 node_qid, u32 hw_id);
+int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id);
+void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
+void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
+void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
+void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
+
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
+void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs);
void mlx5e_qos_close_queues(struct mlx5e_channel *c);
+void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs);
+int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+
+/* TX datapath API */
+u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
/* HTB API */
-int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_root_del(struct mlx5e_priv *priv);
-int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
- u32 parent_classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
- u64 rate, u64 ceil, struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack);
+int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb);
/* MQPRIO TX rate limit */
struct mlx5e_mqprio_rl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 48dc121b2cb4..39ef2a2561a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -269,6 +269,12 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
attr->u.vlan_filtering, br_offloads);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
+ err = mlx5_esw_bridge_vlan_proto_set(vport_num,
+ esw_owner_vhca_id,
+ attr->u.vlan_protocol,
+ br_offloads);
+ break;
default:
err = -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 86fa0bdbee36..fac7e3ff2674 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -21,6 +21,7 @@
#include "en/tc/sample.h"
#include "en_accel/ipsec_rxtx.h"
#include "en/tc/int_port.h"
+#include "en/tc/act/act.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
@@ -511,6 +512,120 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
return 0;
}
+static int
+mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct flow_action_entry *action;
+ struct mlx5e_tc_act *act;
+ bool add = false;
+ int i;
+
+ /* There is no use case currently for more than one action (e.g. pedit).
+ * when there will be, need to handle cleaning multiple actions on err.
+ */
+ if (!flow_offload_has_one_action(&fl_act->action))
+ return -EOPNOTSUPP;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ flow_action_for_each(i, action, &fl_act->action) {
+ act = mlx5e_tc_act_get(action->id, ns_type);
+ if (!act)
+ continue;
+
+ if (!act->offload_action)
+ continue;
+
+ if (!act->offload_action(priv, fl_act, action))
+ add = true;
+ }
+
+ return add ? 0 : -EOPNOTSUPP;
+}
+
+static int
+mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->destroy_action)
+ return -EOPNOTSUPP;
+
+ return act->destroy_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->stats_action)
+ return -EOPNOTSUPP;
+
+ return act->stats_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ switch (fl_act->command) {
+ case FLOW_ACT_REPLACE:
+ return mlx5e_rep_indr_replace_act(rpriv, fl_act);
+ case FLOW_ACT_DESTROY:
+ return mlx5e_rep_indr_destroy_act(rpriv, fl_act);
+ case FLOW_ACT_STATS:
+ return mlx5e_rep_indr_stats_act(rpriv, fl_act);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv,
+ enum tc_setup_type type,
+ void *data)
+{
+ if (!data)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_ACT:
+ return mlx5e_rep_indr_setup_act(rpriv, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
@@ -518,7 +633,7 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
- return -EOPNOTSUPP;
+ return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data);
switch (type) {
case TC_SETUP_BLOCK:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index d98a277eb7f8..f675b1926340 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -7,6 +7,7 @@
#include <linux/rcupdate.h>
#include "en.h"
#include "en/ptp.h"
+#include "en/htb.h"
struct mlx5e_selq_params {
unsigned int num_regular_queues;
@@ -19,6 +20,8 @@ struct mlx5e_selq_params {
bool is_ptp : 1;
};
};
+ u16 htb_maj_id;
+ u16 htb_defcls;
};
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
@@ -44,6 +47,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
.num_tcs = 1,
.is_htb = false,
.is_ptp = false,
+ .htb_maj_id = 0,
+ .htb_defcls = 0,
};
rcu_assign_pointer(selq->active, init_params);
@@ -64,21 +69,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
selq->standby = NULL;
}
-void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
{
+ struct mlx5e_selq_params *selq_active;
+
lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true;
+ selq_active = rcu_dereference_protected(selq->active,
+ lockdep_is_held(selq->state_lock));
+ *selq->standby = *selq_active;
selq->standby->num_channels = params->num_channels;
selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
selq->standby->num_regular_queues =
selq->standby->num_channels * selq->standby->num_tcs;
- selq->standby->is_htb = htb;
selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
}
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
+{
+ struct mlx5e_selq_params *selq_active =
+ rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
+
+ return selq_active->htb_maj_id;
+}
+
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
+{
+ struct mlx5e_selq_params *selq_active;
+
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ selq->is_prepared = true;
+
+ selq_active = rcu_dereference_protected(selq->active,
+ lockdep_is_held(selq->state_lock));
+ *selq->standby = *selq_active;
+ selq->standby->is_htb = htb_maj_id;
+ selq->standby->htb_maj_id = htb_maj_id;
+ selq->standby->htb_defcls = htb_defcls;
+}
+
void mlx5e_selq_apply(struct mlx5e_selq *selq)
{
struct mlx5e_selq_params *old_params;
@@ -137,20 +171,21 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
return selq->num_regular_queues + up;
}
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_selq_params *selq)
{
u16 classid;
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
- if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
+ if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
classid = TC_H_MIN(skb->priority);
else
- classid = READ_ONCE(priv->htb.defcls);
+ classid = selq->htb_defcls;
if (!classid)
return 0;
- return mlx5e_get_txq_by_classid(priv, classid);
+ return mlx5e_htb_get_txq_by_classid(priv->htb, classid);
}
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -187,10 +222,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
up * selq->num_channels;
}
- if (unlikely(selq->is_htb)) {
+ if (unlikely(selq->htb_maj_id)) {
/* num_tcs == 1, shortcut for PTP */
- txq_ix = mlx5e_select_htb_queue(priv, skb);
+ txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
if (txq_ix > 0)
return txq_ix;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
index 6c070141d8f1..fd590f80e4d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
@@ -21,7 +21,9 @@ struct sk_buff;
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
-void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb);
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params);
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls);
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq);
void mlx5e_selq_apply(struct mlx5e_selq *selq);
void mlx5e_selq_cancel(struct mlx5e_selq *selq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index 2755c25ba324..305fde62a78d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -30,7 +30,7 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
NULL, /* FLOW_ACTION_WAKE, */
NULL, /* FLOW_ACTION_QUEUE, */
&mlx5e_tc_act_sample,
- NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_police,
&mlx5e_tc_act_ct,
NULL, /* FLOW_ACTION_CT_METADATA, */
&mlx5e_tc_act_mpls_push,
@@ -106,8 +106,8 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
{
memset(parse_state, 0, sizeof(*parse_state));
parse_state->flow = flow;
- parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
+ parse_state->flow_action = flow_action;
}
void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index f34714c5ddd4..e1570ff056ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -13,7 +13,7 @@
struct mlx5_flow_attr;
struct mlx5e_tc_act_parse_state {
- unsigned int num_actions;
+ struct flow_action *flow_action;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
u32 actions;
@@ -50,6 +50,16 @@ struct mlx5e_tc_act {
bool (*is_multi_table_act)(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
+
+ int (*offload_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act);
+
+ int (*destroy_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
+
+ int (*stats_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
};
struct mlx5e_tc_flow_action {
@@ -76,6 +86,7 @@ extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
+extern struct mlx5e_tc_act mlx5e_tc_act_police;
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index 4726bcb46eec..69949ab830b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -21,7 +21,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
new file mode 100644
index 000000000000..37522352e4b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return false;
+ }
+ if (mlx5e_policer_validate(parse_state->flow_action, act,
+ parse_state->extack))
+ return false;
+
+ return !!mlx5e_get_flow_meters(parse_state->flow->priv->mdev);
+}
+
+static int
+fill_meter_params_from_act(const struct flow_action_entry *act,
+ struct mlx5e_flow_meter_params *params)
+{
+ params->index = act->hw_index;
+ if (act->police.rate_bytes_ps) {
+ params->mode = MLX5_RATE_LIMIT_BPS;
+ /* change rate to bits per second */
+ params->rate = act->police.rate_bytes_ps << 3;
+ params->burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps) {
+ params->mode = MLX5_RATE_LIMIT_PPS;
+ params->rate = act->police.rate_pkt_ps;
+ params->burst = act->police.burst_pkt;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ int err;
+
+ err = fill_meter_params_from_act(act, &attr->meter_attr.params);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
+ attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+
+ return 0;
+}
+
+static bool
+tc_act_is_multi_table_act_police(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return true;
+}
+
+static int
+tc_act_police_offload(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ int err = 0;
+
+ err = fill_meter_params_from_act(act, &params);
+ if (err)
+ return err;
+
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter) && PTR_ERR(meter) == -ENOENT) {
+ meter = mlx5e_tc_meter_replace(priv->mdev, &params);
+ } else if (!IS_ERR(meter)) {
+ err = mlx5e_tc_meter_update(meter, &params);
+ mlx5e_tc_meter_put(meter);
+ }
+
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ err = PTR_ERR(meter);
+ }
+
+ return err;
+}
+
+static int
+tc_act_police_destroy(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+ /* first put for the get and second for cleanup */
+ mlx5e_tc_meter_put(meter);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+static int
+tc_act_police_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ u64 bytes, packets, drops, lastuse;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+
+ mlx5e_tc_meter_get_stats(meter, &bytes, &packets, &drops, &lastuse);
+ flow_stats_update(&fl_act->stats, bytes, packets, drops, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_police = {
+ .can_offload = tc_act_can_offload_police,
+ .parse_action = tc_act_parse_police,
+ .is_multi_table_act = tc_act_is_multi_table_act_police,
+ .offload_action = tc_act_police_offload,
+ .destroy_action = tc_act_police_destroy,
+ .stats_action = tc_act_police_stats,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index a7d9eab19e4a..53b270f652b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -12,7 +12,7 @@ tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
{
struct netlink_ext_ack *extack = parse_state->extack;
- if (parse_state->num_actions != 1) {
+ if (parse_state->flow_action->num_entries != 1) {
NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
new file mode 100644
index 000000000000..a53e205f4a89
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/math64.h>
+#include "lib/aso.h"
+#include "en/tc/post_act.h"
+#include "meter.h"
+#include "en/tc_priv.h"
+
+#define MLX5_START_COLOR_SHIFT 28
+#define MLX5_METER_MODE_SHIFT 24
+#define MLX5_CBS_EXP_SHIFT 24
+#define MLX5_CBS_MAN_SHIFT 16
+#define MLX5_CIR_EXP_SHIFT 8
+
+/* cir = 8*(10^9)*cir_mantissa/(2^cir_exponent)) bits/s */
+#define MLX5_CONST_CIR 8000000000ULL
+#define MLX5_CALC_CIR(m, e) ((MLX5_CONST_CIR * (m)) >> (e))
+#define MLX5_MAX_CIR ((MLX5_CONST_CIR * 0x100) - 1)
+
+/* cbs = cbs_mantissa*2^cbs_exponent */
+#define MLX5_CALC_CBS(m, e) ((m) << (e))
+#define MLX5_MAX_CBS ((0x100ULL << 0x1F) - 1)
+#define MLX5_MAX_HW_CBS 0x7FFFFFFF
+
+struct mlx5e_flow_meter_aso_obj {
+ struct list_head entry;
+ int base_id;
+ int total_meters;
+
+ unsigned long meters_map[0]; /* must be at the end of this struct */
+};
+
+struct mlx5e_flow_meters {
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_aso *aso;
+ struct mutex aso_lock; /* Protects aso operations */
+ int log_granularity;
+ u32 pdn;
+
+ DECLARE_HASHTABLE(hashtbl, 8);
+
+ struct mutex sync_lock; /* protect flow meter operations */
+ struct list_head partial_list;
+ struct list_head full_list;
+
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_post_act *post_act;
+};
+
+static void
+mlx5e_flow_meter_cir_calc(u64 cir, u8 *man, u8 *exp)
+{
+ s64 _cir, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cir << e;
+ if ((s64)m < 0) /* overflow */
+ break;
+ m = div64_u64(m, MLX5_CONST_CIR);
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cir = MLX5_CALC_CIR(m, e);
+ _delta = cir - _cir;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+static void
+mlx5e_flow_meter_cbs_calc(u64 cbs, u8 *man, u8 *exp)
+{
+ s64 _cbs, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cbs >> e;
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cbs = MLX5_CALC_CBS(m, e);
+ _delta = cbs - _cbs;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params)
+{
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl;
+ struct mlx5_wqe_aso_data_seg *aso_data;
+ struct mlx5e_flow_meters *flow_meters;
+ u8 cir_man, cir_exp, cbs_man, cbs_exp;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *aso;
+ u64 rate, burst;
+ u8 ds_cnt;
+ int err;
+
+ rate = meter_params->rate;
+ burst = meter_params->burst;
+
+ /* HW treats each packet as 128 bytes in PPS mode */
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS) {
+ rate <<= 10;
+ burst <<= 7;
+ }
+
+ if (!rate || rate > MLX5_MAX_CIR || !burst || burst > MLX5_MAX_CBS)
+ return -EINVAL;
+
+ /* HW has limitation of total 31 bits for cbs */
+ if (burst > MLX5_MAX_HW_CBS) {
+ mlx5_core_warn(mdev,
+ "burst(%lld) is too large, use HW allowed value(%d)\n",
+ burst, MLX5_MAX_HW_CBS);
+ burst = MLX5_MAX_HW_CBS;
+ }
+
+ mlx5_core_dbg(mdev, "meter mode=%d\n", meter_params->mode);
+ mlx5e_flow_meter_cir_calc(rate, &cir_man, &cir_exp);
+ mlx5_core_dbg(mdev, "rate=%lld, cir=%lld, exp=%d, man=%d\n",
+ rate, MLX5_CALC_CIR(cir_man, cir_exp), cir_exp, cir_man);
+ mlx5e_flow_meter_cbs_calc(burst, &cbs_man, &cbs_exp);
+ mlx5_core_dbg(mdev, "burst=%lld, cbs=%lld, exp=%d, man=%d\n",
+ burst, MLX5_CALC_CBS((u64)cbs_man, cbs_exp), cbs_exp, cbs_man);
+
+ if (!cir_man || !cbs_man)
+ return -EINVAL;
+
+ flow_meters = meter->flow_meters;
+ aso = flow_meters->aso;
+
+ mutex_lock(&flow_meters->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(aso);
+ ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_DS);
+ mlx5_aso_build_wqe(aso, ds_cnt, aso_wqe, meter->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
+
+ aso_ctrl = &aso_wqe->aso_ctrl;
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
+ aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
+ MLX5_ASO_ALWAYS_TRUE << 4;
+ aso_ctrl->data_offset_condition_operand = MLX5_ASO_LOGICAL_OR << 6;
+ aso_ctrl->data_mask = cpu_to_be64(0x80FFFFFFULL << (meter->idx ? 0 : 32));
+
+ aso_data = (struct mlx5_wqe_aso_data_seg *)(aso_wqe + 1);
+ memset(aso_data, 0, sizeof(*aso_data));
+ aso_data->bytewise_data[meter->idx * 8] = cpu_to_be32((0x1 << 31) | /* valid */
+ (MLX5_FLOW_METER_COLOR_GREEN << MLX5_START_COLOR_SHIFT));
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS)
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_NUM_PACKETS << MLX5_METER_MODE_SHIFT);
+ else
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH << MLX5_METER_MODE_SHIFT);
+
+ aso_data->bytewise_data[meter->idx * 8 + 2] = cpu_to_be32((cbs_exp << MLX5_CBS_EXP_SHIFT) |
+ (cbs_man << MLX5_CBS_MAN_SHIFT) |
+ (cir_exp << MLX5_CIR_EXP_SHIFT) |
+ cir_man);
+
+ mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
+
+ /* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
+ err = mlx5_aso_poll_cq(aso, true, 10);
+ mutex_unlock(&flow_meters->aso_lock);
+
+ return err;
+}
+
+static int
+mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ void *obj;
+ int err;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity);
+
+ obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj);
+ MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err) {
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) created\n", *obj_id);
+ }
+
+ return err;
+}
+
+static void
+mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) destroyed\n", obj_id);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
+{
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ struct mlx5e_flow_meter_handle *meter;
+ struct mlx5_fc *counter;
+ int err, pos, total;
+ u32 id;
+
+ meter = kzalloc(sizeof(*meter), GFP_KERNEL);
+ if (!meter)
+ return ERR_PTR(-ENOMEM);
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_red_counter;
+ }
+ meter->red_counter = counter;
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_green_counter;
+ }
+ meter->green_counter = counter;
+
+ meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
+ struct mlx5e_flow_meter_aso_obj,
+ entry);
+ /* 2 meters in one object */
+ total = 1 << (flow_meters->log_granularity + 1);
+ if (!meters_obj) {
+ err = mlx5e_flow_meter_create_aso_obj(flow_meters, &id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create flow meter ASO object\n");
+ goto err_create;
+ }
+
+ meters_obj = kzalloc(sizeof(*meters_obj) + BITS_TO_BYTES(total),
+ GFP_KERNEL);
+ if (!meters_obj) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ meters_obj->base_id = id;
+ meters_obj->total_meters = total;
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ pos = 0;
+ } else {
+ pos = find_first_zero_bit(meters_obj->meters_map, total);
+ if (bitmap_weight(meters_obj->meters_map, total) == total - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->full_list);
+ }
+ }
+
+ bitmap_set(meters_obj->meters_map, pos, 1);
+ meter->flow_meters = flow_meters;
+ meter->meters_obj = meters_obj;
+ meter->obj_id = meters_obj->base_id + pos / 2;
+ meter->idx = pos % 2;
+
+ mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+
+ return meter;
+
+err_mem:
+ mlx5e_flow_meter_destroy_aso_obj(mdev, id);
+err_create:
+ mlx5_fc_destroy(mdev, meter->green_counter);
+err_green_counter:
+ mlx5_fc_destroy(mdev, meter->red_counter);
+err_red_counter:
+ kfree(meter);
+ return ERR_PTR(err);
+}
+
+static void
+__mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ int n, pos;
+
+ mlx5_fc_destroy(mdev, meter->green_counter);
+ mlx5_fc_destroy(mdev, meter->red_counter);
+
+ meters_obj = meter->meters_obj;
+ pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
+ bitmap_clear(meters_obj->meters_map, pos, 1);
+ n = bitmap_weight(meters_obj->meters_map, meters_obj->total_meters);
+ if (n == 0) {
+ list_del(&meters_obj->entry);
+ mlx5e_flow_meter_destroy_aso_obj(mdev, meters_obj->base_id);
+ kfree(meters_obj);
+ } else if (n == meters_obj->total_meters - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ }
+
+ mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+ kfree(meter);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_tc_meter_get(struct mlx5e_flow_meters *flow_meters, u32 index)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ hash_for_each_possible(flow_meters->hashtbl, meter, hlist, index)
+ if (meter->params.index == index)
+ goto add_ref;
+
+ return ERR_PTR(-ENOENT);
+
+add_ref:
+ meter->refcnt++;
+
+ return meter;
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ mutex_unlock(&flow_meters->sync_lock);
+
+ return meter;
+}
+
+static void
+__mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ if (--meter->refcnt == 0) {
+ hash_del(&meter->hlist);
+ __mlx5e_flow_meter_free(meter);
+ }
+}
+
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+
+ mutex_lock(&flow_meters->sync_lock);
+ __mlx5e_tc_meter_put(meter);
+ mutex_unlock(&flow_meters->sync_lock);
+}
+
+static struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = __mlx5e_flow_meter_alloc(flow_meters);
+ if (IS_ERR(meter))
+ return meter;
+
+ hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
+ meter->params.index = params->index;
+ meter->refcnt++;
+
+ return meter;
+}
+
+static int
+__mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ int err = 0;
+
+ if (meter->params.mode != params->mode || meter->params.rate != params->rate ||
+ meter->params.burst != params->burst) {
+ err = mlx5e_tc_meter_modify(mdev, meter, params);
+ if (err)
+ goto out;
+
+ meter->params.mode = params->mode;
+ meter->params.rate = params->rate;
+ meter->params.burst = params->burst;
+ }
+
+out:
+ return err;
+}
+
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&flow_meters->sync_lock);
+ err = __mlx5e_tc_meter_update(meter, params);
+ mutex_unlock(&flow_meters->sync_lock);
+ return err;
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ if (IS_ERR(meter)) {
+ meter = mlx5e_tc_meter_alloc(flow_meters, params);
+ if (IS_ERR(meter)) {
+ err = PTR_ERR(meter);
+ goto err_get;
+ }
+ }
+
+ err = __mlx5e_tc_meter_update(meter, params);
+ if (err)
+ goto err_update;
+
+ mutex_unlock(&flow_meters->sync_lock);
+ return meter;
+
+err_update:
+ __mlx5e_tc_meter_put(meter);
+err_get:
+ mutex_unlock(&flow_meters->sync_lock);
+ return ERR_PTR(err);
+}
+
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters)
+{
+ return flow_meters->ns_type;
+}
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (IS_ERR_OR_NULL(post_act)) {
+ netdev_dbg(priv->netdev,
+ "flow meter offload is not supported, post action is missing\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ flow_meters = kzalloc(sizeof(*flow_meters), GFP_KERNEL);
+ if (!flow_meters)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_core_alloc_pd(mdev, &flow_meters->pdn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc pd for flow meter aso, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_meters->aso = mlx5_aso_create(mdev, flow_meters->pdn);
+ if (IS_ERR(flow_meters->aso)) {
+ mlx5_core_warn(mdev, "Failed to create aso wqe for flow meter\n");
+ err = PTR_ERR(flow_meters->aso);
+ goto err_sq;
+ }
+
+ mutex_init(&flow_meters->sync_lock);
+ INIT_LIST_HEAD(&flow_meters->partial_list);
+ INIT_LIST_HEAD(&flow_meters->full_list);
+
+ flow_meters->ns_type = ns_type;
+ flow_meters->mdev = mdev;
+ flow_meters->post_act = post_act;
+ mutex_init(&flow_meters->aso_lock);
+ flow_meters->log_granularity = min_t(int, 6,
+ MLX5_CAP_QOS(mdev, log_meter_aso_max_alloc));
+
+ return flow_meters;
+
+err_sq:
+ mlx5_core_dealloc_pd(mdev, flow_meters->pdn);
+err_out:
+ kfree(flow_meters);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters)
+{
+ if (IS_ERR_OR_NULL(flow_meters))
+ return;
+
+ mlx5_aso_destroy(flow_meters->aso);
+ mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn);
+ kfree(flow_meters);
+}
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse)
+{
+ u64 bytes1, packets1, lastuse1;
+ u64 bytes2, packets2, lastuse2;
+
+ mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1);
+ mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2);
+
+ *bytes = bytes1 + bytes2;
+ *packets = packets1 + packets2;
+ *drops = packets2;
+ *lastuse = max_t(u64, lastuse1, lastuse2);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
new file mode 100644
index 000000000000..6de6e8a16327
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_FLOW_METER_H__
+#define __MLX5_EN_FLOW_METER_H__
+
+struct mlx5e_post_meter_priv;
+struct mlx5e_flow_meter_aso_obj;
+struct mlx5e_flow_meters;
+struct mlx5_flow_attr;
+
+enum mlx5e_flow_meter_mode {
+ MLX5_RATE_LIMIT_BPS,
+ MLX5_RATE_LIMIT_PPS,
+};
+
+struct mlx5e_flow_meter_params {
+ enum mlx5e_flow_meter_mode mode;
+ /* police action index */
+ u32 index;
+ u64 rate;
+ u64 burst;
+};
+
+struct mlx5e_flow_meter_handle {
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ u32 obj_id;
+ u8 idx;
+
+ int refcnt;
+ struct hlist_node hlist;
+ struct mlx5e_flow_meter_params params;
+
+ struct mlx5_fc *green_counter;
+ struct mlx5_fc *red_counter;
+};
+
+struct mlx5e_meter_attr {
+ struct mlx5e_flow_meter_params params;
+ struct mlx5e_flow_meter_handle *meter;
+ struct mlx5e_post_meter_priv *post_meter;
+};
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params);
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter);
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params);
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters);
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_action);
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters);
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+
+#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index dea137dd744b..4e48946c4c2a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -22,9 +22,9 @@ struct mlx5e_post_act_handle {
u32 id;
};
-#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
-#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0)
-#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX
+#define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK
struct mlx5e_post_act *
mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
@@ -36,7 +36,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
int err;
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
- if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
@@ -128,6 +128,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE;
post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
new file mode 100644
index 000000000000..8b77e822810e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "en/tc_priv.h"
+#include "post_meter.h"
+#include "en/tc/post_act.h"
+
+#define MLX5_PACKET_COLOR_BITS MLX5_REG_MAPPING_MBITS(PACKET_COLOR_TO_REG)
+#define MLX5_PACKET_COLOR_MASK MLX5_REG_MAPPING_MASK(PACKET_COLOR_TO_REG)
+
+struct mlx5e_post_meter_priv {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *fwd_green_rule;
+ struct mlx5_flow_handle *drop_red_rule;
+};
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->ft;
+}
+
+static int
+mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *root_ns;
+
+ root_ns = mlx5_get_flow_namespace(priv->mdev, ns_type);
+ if (!root_ns) {
+ mlx5_core_warn(priv->mdev, "Failed to get namespace for flow meter\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+
+ post_meter->ft = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(post_meter->ft)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
+ return PTR_ERR(post_meter->ft);
+ }
+
+ return 0;
+}
+
+static int
+mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *misc2, *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ misc2 = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_5, MLX5_PACKET_COLOR_MASK);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ post_meter->fg = mlx5_create_flow_group(post_meter->ft, flow_group_in);
+ if (IS_ERR(post_meter->fg)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow group\n");
+ err = PTR_ERR(post_meter->fg);
+ }
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static int
+mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
+{
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter_id = mlx5_fc_id(red_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
+ err = PTR_ERR(rule);
+ goto err_red;
+ }
+ post_meter->drop_red_rule = rule;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = mlx5e_tc_post_act_get_ft(post_act);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(green_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
+ err = PTR_ERR(rule);
+ goto err_green;
+ }
+ post_meter->fwd_green_rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_green:
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+err_red:
+ kvfree(spec);
+ return err;
+}
+
+static void
+mlx5e_post_meter_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+ mlx5_del_flow_rules(post_meter->fwd_green_rule);
+}
+
+static void
+mlx5e_post_meter_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_group(post_meter->fg);
+}
+
+static void
+mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_table(post_meter->ft);
+}
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
+{
+ struct mlx5e_post_meter_priv *post_meter;
+ int err;
+
+ post_meter = kzalloc(sizeof(*post_meter), GFP_KERNEL);
+ if (!post_meter)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_post_meter_table_create(priv, ns_type, post_meter);
+ if (err)
+ goto err_ft;
+
+ err = mlx5e_post_meter_fg_create(priv, post_meter);
+ if (err)
+ goto err_fg;
+
+ err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter,
+ red_counter);
+ if (err)
+ goto err_rules;
+
+ return post_meter;
+
+err_rules:
+ mlx5e_post_meter_fg_destroy(post_meter);
+err_fg:
+ mlx5e_post_meter_table_destroy(post_meter);
+err_ft:
+ kfree(post_meter);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_rules_destroy(post_meter);
+ mlx5e_post_meter_fg_destroy(post_meter);
+ mlx5e_post_meter_table_destroy(post_meter);
+ kfree(post_meter);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
new file mode 100644
index 000000000000..34d0e4b9fc7a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_POST_METER_H__
+#define __MLX5_EN_POST_METER_H__
+
+#define packet_color_to_reg { \
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5, \
+ .moffset = 0, \
+ .mlen = 8, \
+ .soffset = MLX5_BYTE_OFF(fte_match_param, \
+ misc_parameters_2.metadata_reg_c_5), \
+}
+
+struct mlx5e_post_meter_priv;
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter);
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
+
+#endif /* __MLX5_EN_POST_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index ba171c7f0a67..864ce0c393e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -36,8 +36,8 @@
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
-#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
-#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
+#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
/* Statically allocate modify actions for
* ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
@@ -2062,7 +2062,7 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
/* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg.
*/
- if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
err_msg = "post action is missing";
err = -EOPNOTSUPP;
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 00a3ba862afb..5bbd6b92840f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -62,10 +62,11 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_4),\
}
+/* 8 LSB of metadata C5 are reserved for packet color */
#define fteid_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
- .moffset = 0,\
- .mlen = 32,\
+ .moffset = 8,\
+ .mlen = 24,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_5),\
}
@@ -84,10 +85,8 @@ struct mlx5_ct_attr {
.mlen = ESW_ZONE_ID_BITS,\
}
-#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
-#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
-#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
+#define MLX5_CT_ZONE_BITS MLX5_REG_MAPPING_MBITS(ZONE_TO_REG)
+#define MLX5_CT_ZONE_MASK MLX5_REG_MAPPING_MASK(ZONE_TO_REG)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 3b74a6fd5c43..10c9a8a79d00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -11,7 +11,6 @@
#define MLX5E_TC_MAX_SPLITS 1
-#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
@@ -44,6 +43,8 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_act_parse_state parse_state;
};
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
+
/* Helper struct for accessing a struct containing list_head array.
* Containing struct
* |- Helper array
@@ -203,7 +204,13 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
+struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev);
+
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack);
+
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index a8cfab4a393c..cc18d97d8ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -7,6 +7,8 @@
#include "en.h"
#include <net/xdp_sock_drv.h>
+#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL
+
/* RX data path */
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
@@ -21,6 +23,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
+retry:
dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk)
return -ENOMEM;
@@ -32,6 +35,17 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
*/
dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
+ /* MTT page mapping has alignment requirements. If they are not
+ * satisfied, leak the descriptor so that it won't come again, and try
+ * to allocate a new one.
+ */
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) {
+ xsk_buff_discard(dma_info->xsk);
+ goto retry;
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 04c0a5e1c89a..1839f1ab1ddd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -194,4 +194,14 @@ static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
}
+
+static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
+{
+ return mlx5e_ktls_init_tx(priv);
+}
+
+static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
+{
+ mlx5e_ktls_cleanup_tx(priv);
+}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 3ae6067c7e6b..20a4f1e585af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -86,7 +86,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -158,10 +158,10 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule;
int err = 0;
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -267,7 +267,7 @@ out:
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
+ struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -277,7 +277,7 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -326,10 +326,10 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- dest.ft = priv->fs.accel_tcp->tables[i].t;
+ dest.ft = priv->fs->accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -344,7 +344,7 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
{
struct mlx5e_accel_fs_tcp *fs_tcp;
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
@@ -357,7 +357,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
{
int i;
- if (!priv->fs.accel_tcp)
+ if (!priv->fs->accel_tcp)
return;
accel_fs_tcp_disable(priv);
@@ -365,8 +365,8 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(priv, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(priv->fs->accel_tcp);
+ priv->fs->accel_tcp = NULL;
}
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
@@ -376,8 +376,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
- if (!priv->fs.accel_tcp)
+ priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
+ if (!priv->fs->accel_tcp)
return -ENOMEM;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
@@ -396,7 +396,7 @@ err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(priv, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(priv->fs->accel_tcp);
+ priv->fs->accel_tcp = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 8315e8f603d7..f8113fd23265 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -184,13 +184,13 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
fs_prot = &accel_esp->fs_prot[type];
fs_prot->default_dest =
- mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
@@ -205,7 +205,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -249,7 +249,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
@@ -271,7 +271,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
deleted file mode 100644
index e4eeb2ba21c7..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_STEERING_H__
-#define __MLX5_IPSEC_STEERING_H__
-
-#include "en.h"
-#include "ipsec.h"
-#include "ipsec_offload.h"
-#include "en/fs.h"
-
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule);
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule);
-#endif /* __MLX5_IPSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 814f2a56f633..30a70d139046 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -54,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
+ if (!mlx5e_ktls_type_check(mdev, crypto_info))
return -EOPNOTSUPP;
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index d016624fbc9d..948400dee525 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -42,6 +42,8 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
@@ -62,6 +64,8 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
atomic64_t tx_tls_del;
+ atomic64_t tx_tls_pool_alloc;
+ atomic64_t tx_tls_pool_free;
atomic64_t rx_tls_ctx;
atomic64_t rx_tls_del;
};
@@ -69,6 +73,7 @@ struct mlx5e_tls_sw_stats {
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
+ struct mlx5e_tls_tx_pool *tx_pool;
};
int mlx5e_ktls_init(struct mlx5e_priv *priv);
@@ -83,6 +88,15 @@ static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
+static inline int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+}
+
static inline int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 2ab46c4247ff..7c1c0eb16787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -41,6 +41,8 @@
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_free) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index f239fb2e832f..6b6c7044b64a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -35,30 +35,70 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += 1; /* fence nop */
return stop_room;
}
+static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
+{
+ MLX5_SET(tisc, tisc, tls_en, 1);
+ MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+}
+
static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
- MLX5_SET(tisc, tisc, tls_en, 1);
+ return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
+}
+
+static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
- return mlx5e_create_tis(mdev, in, tisn);
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
}
struct mlx5e_ktls_offload_context_tx {
- struct tls_offload_context_tx *tx_ctx;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
- struct mlx5e_tls_sw_stats *sw_stats;
+ /* fast path */
u32 expected_seq;
u32 tisn;
- u32 key_id;
bool ctx_post_pending;
+ /* control / resync */
+ struct list_head list_node; /* member of the pool */
+ struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ struct tls_offload_context_tx *tx_ctx;
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ u32 key_id;
+ u8 create_err : 1;
};
static void
@@ -82,28 +122,368 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
return *ctx;
}
+/* struct for callback API management */
+struct mlx5e_async_ctx {
+ struct mlx5_async_work context;
+ struct mlx5_async_ctx async_ctx;
+ struct work_struct work;
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct completion complete;
+ int err;
+ union {
+ u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
+ u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
+ };
+};
+
+static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+{
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+ if (!bulk_async)
+ return NULL;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
+ init_completion(&async->complete);
+ }
+
+ return bulk_async;
+}
+
+static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
+ }
+ kvfree(bulk_async);
+}
+
+static void create_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ if (status) {
+ async->err = status;
+ priv_tx->create_err = 1;
+ goto out;
+ }
+
+ priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
+out:
+ complete(&async->complete);
+}
+
+static void destroy_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ complete(&async->complete);
+ kfree(priv_tx);
+}
+
+static struct mlx5e_ktls_offload_context_tx *
+mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
+ struct mlx5e_async_ctx *async)
+{
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ int err;
+
+ priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
+ if (!priv_tx)
+ return ERR_PTR(-ENOMEM);
+
+ priv_tx->mdev = mdev;
+ priv_tx->sw_stats = sw_stats;
+
+ if (!async) {
+ err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
+ if (err)
+ goto err_out;
+ } else {
+ async->priv_tx = priv_tx;
+ err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+ async->out_create, sizeof(async->out_create),
+ create_tis_callback, &async->context);
+ if (err)
+ goto err_out;
+ }
+
+ return priv_tx;
+
+err_out:
+ kfree(priv_tx);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ struct mlx5e_async_ctx *async)
+{
+ if (priv_tx->create_err) {
+ complete(&async->complete);
+ kfree(priv_tx);
+ return;
+ }
+ async->priv_tx = priv_tx;
+ mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
+ &async->async_ctx,
+ async->out_destroy, sizeof(async->out_destroy),
+ destroy_tis_callback, &async->context);
+}
+
+static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
+ struct list_head *list, int size)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = mlx5e_bulk_async_init(mdev, size);
+ if (!bulk_async)
+ return;
+
+ i = 0;
+ list_for_each_entry(obj, list, list_node) {
+ mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+ i++;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ wait_for_completion(&async->complete);
+ }
+ mlx5e_bulk_async_cleanup(bulk_async, size);
+}
+
+/* Recycling pool API */
+
+#define MLX5E_TLS_TX_POOL_BULK (16)
+#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
+#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
+
+struct mlx5e_tls_tx_pool {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ struct mutex lock; /* Protects access to the pool */
+ struct list_head list;
+ size_t size;
+
+ struct workqueue_struct *wq;
+ struct work_struct create_work;
+ struct work_struct destroy_work;
+};
+
+static void create_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, create_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ LIST_HEAD(local_list);
+ int i, j, err = 0;
+
+ bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
+ if (!bulk_async)
+ return;
+
+ for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+ list_add(&obj->list_node, &local_list);
+ }
+
+ for (j = 0; j < i; j++) {
+ struct mlx5e_async_ctx *async = &bulk_async[j];
+
+ wait_for_completion(&async->complete);
+ if (!err && async->err)
+ err = async->err;
+ }
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
+ mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+ if (err)
+ goto err_out;
+
+ mutex_lock(&pool->lock);
+ if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ goto err_out;
+ }
+ list_splice(&local_list, &pool->list);
+ pool->size += MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+ return;
+
+err_out:
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static void destroy_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ mutex_lock(&pool->lock);
+ if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ return;
+ }
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
+ struct mlx5e_tls_sw_stats *sw_stats)
+{
+ struct mlx5e_tls_tx_pool *pool;
+
+ BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
+
+ pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
+ if (!pool->wq)
+ goto err_free;
+
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->lock);
+
+ INIT_WORK(&pool->create_work, create_work);
+ INIT_WORK(&pool->destroy_work, destroy_work);
+
+ pool->mdev = mdev;
+ pool->sw_stats = sw_stats;
+
+ return pool;
+
+err_free:
+ kvfree(pool);
+ return NULL;
+}
+
+static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ }
+ if (pool->size) {
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
+ atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
+ }
+}
+
+static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ mlx5e_tls_tx_pool_list_cleanup(pool);
+ destroy_workqueue(pool->wq);
+ kvfree(pool);
+}
+
+static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
+{
+ mutex_lock(&pool->lock);
+ list_add(&obj->list_node, &pool->list);
+ if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, &pool->destroy_work);
+ mutex_unlock(&pool->lock);
+}
+
+static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+
+ mutex_lock(&pool->lock);
+ if (unlikely(pool->size == 0)) {
+ /* pool is empty:
+ * - trigger the populating work, and
+ * - serve the current context via the regular blocking api.
+ */
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
+ if (!IS_ERR(obj))
+ atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
+ return obj;
+ }
+
+ obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
+ list_node);
+ list_del(&obj->list_node);
+ if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ return obj;
+}
+
+/* End of pool API */
+
int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct mlx5e_tls_tx_pool *pool;
struct tls_context *tls_ctx;
- struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int err;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
- priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
- if (!priv_tx)
- return -ENOMEM;
+ priv_tx = pool_pop(pool);
+ if (IS_ERR(priv_tx))
+ return PTR_ERR(priv_tx);
- err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
+ err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
if (err)
goto err_create_key;
- priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,36 +491,29 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
- err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
- if (err)
- goto err_create_tis;
-
priv_tx->ctx_post_pending = true;
atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0;
-err_create_tis:
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
err_create_key:
- kfree(priv_tx);
+ pool_push(pool, priv_tx);
return err;
}
void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
- struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_tx_pool *pool;
struct mlx5e_priv *priv;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
- mlx5e_destroy_tis(mdev, priv_tx->tisn);
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
- kfree(priv_tx);
+ mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+ pool_push(pool, priv_tx);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
@@ -201,6 +574,16 @@ post_progress_params(struct mlx5e_txqsq *sq,
sq->pc += num_wqebbs;
}
+static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ tx_fill_wi(sq, pi, 1, 0, NULL);
+
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+}
+
static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
@@ -212,6 +595,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
post_static_params(sq, priv_tx, fence_first_post);
post_progress_params(sq, priv_tx, progress_fence);
+ tx_post_fence_nop(sq);
}
struct tx_sync_info {
@@ -304,7 +688,7 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
}
static int
-tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
+tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
{
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_data_seg *dseg;
@@ -326,7 +710,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->tis_tir_num = cpu_to_be32(tisn << 8);
- cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -361,67 +744,39 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats->tls_dump_bytes += wi->num_bytes;
}
-static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-
- tx_fill_wi(sq, pi, 1, 0, NULL);
-
- mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
-}
-
static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
int datalen,
u32 seq)
{
- struct mlx5e_sq_stats *stats = sq->stats;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- int i = 0;
+ int i;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
- if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
- if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
- stats->tls_skip_no_sync_data++;
- return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
- }
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
+ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
+ /* We might get here with ret == FAIL if a retransmission
+ * reaches the driver after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
- stats->tls_drop_no_sync_data++;
- goto err_out;
- }
-
- stats->tls_ooo++;
+ return ret;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
- /* If no dump WQE was sent, we need to have a fence NOP WQE before the
- * actual data xmit.
- */
- if (!info.nr_frags) {
- tx_post_fence_nop(sq);
- return MLX5E_KTLS_SYNC_DONE;
- }
-
- for (; i < info.nr_frags; i++) {
+ for (i = 0; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
orig_fsz = skb_frag_size(f);
do {
- bool fence = !(i || frag_offset);
unsigned int fsz;
n++;
fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
skb_frag_size_set(f, fsz);
- if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+ if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
page_ref_add(skb_frag_page(f), n - 1);
goto err_out;
}
@@ -457,7 +812,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
int datalen;
u32 seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return true;
@@ -469,24 +824,27 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
- if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
+ if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- }
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+ stats->tls_ooo++;
+
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ stats->tls_skip_no_sync_data++;
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
- fallthrough;
+ goto err_out;
case MLX5E_KTLS_SYNC_FAIL:
+ stats->tls_drop_no_sync_data++;
goto err_out;
}
}
@@ -505,3 +863,24 @@ err_out:
dev_kfree_skb_any(skb);
return false;
}
+
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return 0;
+
+ priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
+ if (!priv->tls->tx_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return;
+
+ mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
+ priv->tls->tx_pool = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 49cca6bd49a1..cd7f245dcf14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -147,9 +147,9 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
+ dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
@@ -172,10 +172,10 @@ static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
int i;
arfs_del_rules(priv);
- destroy_workqueue(priv->fs.arfs->wq);
+ destroy_workqueue(priv->fs->arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
}
}
@@ -185,13 +185,13 @@ void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
return;
_mlx5e_cleanup_tables(priv);
- kvfree(priv->fs.arfs);
+ kvfree(priv->fs->arfs);
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
+ struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
@@ -321,7 +321,7 @@ out:
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -332,7 +332,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -361,14 +361,14 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
- priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
- if (!priv->fs.arfs)
+ priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
+ if (!priv->fs->arfs)
return -ENOMEM;
- spin_lock_init(&priv->fs.arfs->arfs_lock);
- INIT_LIST_HEAD(&priv->fs.arfs->rules);
- priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs.arfs->wq)
+ spin_lock_init(&priv->fs->arfs->arfs_lock);
+ INIT_LIST_HEAD(&priv->fs->arfs->rules);
+ priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs->arfs->wq)
goto err;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
@@ -381,7 +381,7 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
err_des:
_mlx5e_cleanup_tables(priv);
err:
- kvfree(priv->fs.arfs);
+ kvfree(priv->fs->arfs);
return err;
}
@@ -396,8 +396,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -408,7 +408,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -425,12 +425,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -474,7 +474,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -592,9 +592,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -647,7 +647,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -691,7 +691,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
struct flow_keys fk;
@@ -725,7 +725,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
+ queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 6e80585d731f..b811207fe5ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -30,6 +30,8 @@
* SOFTWARE.
*/
+#include <linux/ethtool_netlink.h>
+
#include "en.h"
#include "en/port.h"
#include "en/params.h"
@@ -305,12 +307,18 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
}
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
{
param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
+
+ kernel_param->tcp_data_split =
+ (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
@@ -320,7 +328,7 @@ static void mlx5e_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
@@ -451,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
*/
- if (priv->htb.maj_id) {
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
__func__);
@@ -2067,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
* the numeration of the QoS SQs will change, while per-queue qdiscs are
* attached.
*/
- if (priv->htb.maj_id) {
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
__func__);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d2f0773f95c6..e2a9b9be5c1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -37,13 +37,13 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
#include "en.h"
-#include "en_rep.h"
+#include "en_tc.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type);
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai);
enum {
@@ -132,9 +132,8 @@ struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
return vlan->ft.t;
}
-static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
+static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
{
- struct net_device *ndev = priv->netdev;
int max_list_size;
int list_size;
u16 *vlans;
@@ -143,15 +142,15 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
list_size++;
- max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
+ max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
if (list_size > max_list_size) {
- netdev_warn(ndev,
- "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
- list_size, max_list_size);
+ mlx5_core_warn(fs->mdev,
+ "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+ list_size, max_list_size);
list_size = max_list_size;
}
@@ -160,16 +159,16 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
}
- err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
+ err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
if (err)
- netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
- err);
+ mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n",
+ err);
kvfree(vlans);
return err;
@@ -183,18 +182,18 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
-static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.l2.ft.t;
+ dest.ft = fs->l2.ft.t;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -204,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
- rule_p = &priv->fs.vlan->untagged_rule;
+ rule_p = &fs->vlan->untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- rule_p = &priv->fs.vlan->any_cvlan_rule;
+ rule_p = &fs->vlan->any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- rule_p = &priv->fs.vlan->any_svlan_rule;
+ rule_p = &fs->vlan->any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- rule_p = &priv->fs.vlan->active_svlans_rule[vid];
+ rule_p = &fs->vlan->active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@@ -231,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
vid);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
- rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
+ rule_p = &fs->vlan->active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -250,13 +249,13 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__);
}
return err;
}
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
struct mlx5_flow_spec *spec;
@@ -267,68 +266,68 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
return -ENOMEM;
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
- err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
+ err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
kvfree(spec);
return err;
}
-static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
- enum mlx5e_vlan_rule_type rule_type, u16 vid)
+static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->fs.vlan->untagged_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
- priv->fs.vlan->untagged_rule = NULL;
+ if (fs->vlan->untagged_rule) {
+ mlx5_del_flow_rules(fs->vlan->untagged_rule);
+ fs->vlan->untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- if (priv->fs.vlan->any_cvlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
- priv->fs.vlan->any_cvlan_rule = NULL;
+ if (fs->vlan->any_cvlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
+ fs->vlan->any_cvlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- if (priv->fs.vlan->any_svlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
- priv->fs.vlan->any_svlan_rule = NULL;
+ if (fs->vlan->any_svlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
+ fs->vlan->any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- if (priv->fs.vlan->active_svlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
- priv->fs.vlan->active_svlans_rule[vid] = NULL;
+ if (fs->vlan->active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
+ fs->vlan->active_svlans_rule[vid] = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- if (priv->fs.vlan->active_cvlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
- priv->fs.vlan->active_cvlans_rule[vid] = NULL;
+ if (fs->vlan->active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
+ fs->vlan->active_cvlans_rule[vid] = NULL;
}
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
break;
}
}
-static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
{
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
-static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
+static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
{
int err;
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
if (err)
return err;
- return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
static struct mlx5_flow_handle *
@@ -354,101 +353,101 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.vlan->trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
- __func__, err);
+ priv->fs->vlan->trap_rule = NULL;
+ mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.vlan->trap_rule = rule;
+ priv->fs->vlan->trap_rule = rule;
return 0;
}
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan->trap_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
- priv->fs.vlan->trap_rule = NULL;
+ if (priv->fs->vlan->trap_rule) {
+ mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
+ priv->fs->vlan->trap_rule = NULL;
}
}
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.l2.trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
- __func__, err);
+ priv->fs->l2.trap_rule = NULL;
+ mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.l2.trap_rule = rule;
+ priv->fs->l2.trap_rule = rule;
return 0;
}
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.l2.trap_rule) {
- mlx5_del_flow_rules(priv->fs.l2.trap_rule);
- priv->fs.l2.trap_rule = NULL;
+ if (priv->fs->l2.trap_rule) {
+ mlx5_del_flow_rules(priv->fs->l2.trap_rule);
+ priv->fs->l2.trap_rule = NULL;
}
}
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan->cvlan_filter_disabled)
+ if (!priv->fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = false;
+ priv->fs->vlan->cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan->cvlan_filter_disabled)
+ if (priv->fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = true;
+ priv->fs->vlan->cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
{
int err;
- set_bit(vid, priv->fs.vlan->active_cvlans);
+ set_bit(vid, fs->vlan->active_cvlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err)
- clear_bit(vid, priv->fs.vlan->active_cvlans);
+ clear_bit(vid, fs->vlan->active_cvlans);
return err;
}
-static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev, u16 vid)
{
- struct net_device *netdev = priv->netdev;
int err;
- set_bit(vid, priv->fs.vlan->active_svlans);
+ set_bit(vid, fs->vlan->active_svlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
+ clear_bit(vid, fs->vlan->active_svlans);
return err;
}
@@ -457,86 +456,91 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
return err;
}
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q)
- return mlx5e_vlan_rx_add_cvid(priv, vid);
+ return mlx5e_vlan_rx_add_cvid(fs, vid);
else if (be16_to_cpu(proto) == ETH_P_8021AD)
- return mlx5e_vlan_rx_add_svid(priv, vid);
+ return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
return -EOPNOTSUPP;
}
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q) {
- clear_bit(vid, priv->fs.vlan->active_cvlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ clear_bit(vid, fs->vlan->active_cvlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
- netdev_update_features(dev);
+ clear_bit(vid, fs->vlan->active_svlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_update_features(netdev);
}
return 0;
}
-static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_add_any_vid_rules(priv);
+ if (fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_add_any_vid_rules(fs);
}
static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
{
int i;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+ WARN_ON_ONCE(priv->fs->state_destroy);
mlx5e_remove_vlan_trap(priv);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_del_any_vid_rules(priv);
+ if (priv->fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_del_any_vid_rules(priv->fs);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_hash_node *hn)
{
u8 action = hn->action;
@@ -547,9 +551,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
switch (action) {
case MLX5E_ACTION_ADD:
- mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
if (!is_multicast_ether_addr(mac_addr)) {
- l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
+ l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
hn->mpfs = !l2_err;
}
hn->action = MLX5E_ACTION_NONE;
@@ -557,52 +561,50 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
case MLX5E_ACTION_DEL:
if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
- l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
- mlx5e_del_l2_flow_rule(priv, &hn->ai);
+ l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
+ mlx5e_del_l2_flow_rule(fs, &hn->ai);
mlx5e_del_l2_from_hash(hn);
break;
}
if (l2_err)
- netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
+ mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n",
+ action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
}
-static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(netdev);
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
- priv->netdev->dev_addr);
-
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev);
}
-static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
+static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
+ struct net_device *ndev,
u8 addr_array[][ETH_ALEN], int size)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
- struct net_device *ndev = priv->netdev;
struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list;
struct hlist_node *tmp;
int i = 0;
int hi;
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr);
- else if (priv->fs.l2.broadcast_enabled)
+ else if (fs->l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -614,7 +616,8 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
}
}
-static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
+static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
int list_type)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
@@ -627,19 +630,19 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err;
int hi;
- size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
+ size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ?
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++;
if (size > max_size) {
- netdev_warn(priv->netdev,
- "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
- is_uc ? "UC" : "MC", size, max_size);
+ mlx5_core_warn(fs->mdev,
+ "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+ is_uc ? "UC" : "MC", size, max_size);
size = max_size;
}
@@ -649,65 +652,67 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
err = -ENOMEM;
goto out;
}
- mlx5e_fill_addr_array(priv, list_type, addr_array, size);
+ mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
}
- err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
+ err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
out:
if (err)
- netdev_err(priv->netdev,
- "Failed to modify vport %s list err(%d)\n",
- is_uc ? "UC" : "MC", err);
+ mlx5_core_err(fs->mdev,
+ "Failed to modify vport %s list err(%d)\n",
+ is_uc ? "UC" : "MC", err);
kfree(addr_array);
}
-static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
+static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_l2_table *ea = &priv->fs.l2;
+ struct mlx5e_l2_table *ea = &fs->l2;
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
- mlx5_modify_nic_vport_promisc(priv->mdev, 0,
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
+ mlx5_modify_nic_vport_promisc(fs->mdev, 0,
ea->allmulti_enabled,
ea->promisc_enabled);
}
-static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
+ mlx5e_execute_l2_action(fs, hn);
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
+ mlx5e_execute_l2_action(fs, hn);
}
-static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
- mlx5e_sync_netdev_addr(priv);
+ if (fs->state_destroy)
+ mlx5e_sync_netdev_addr(fs, netdev);
- mlx5e_apply_netdev_addr(priv);
+ mlx5e_apply_netdev_addr(fs);
}
#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
-static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
+static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
{
- struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+ struct mlx5_flow_table *ft = fs->promisc.ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -718,22 +723,22 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
if (!spec)
return -ENOMEM;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
- rule_p = &priv->fs.promisc.rule;
+ rule_p = &fs->promisc.rule;
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
+ mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
}
-static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
+static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+ struct mlx5e_flow_table *ft = &fs->promisc.ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -742,14 +747,14 @@ static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
+ mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err);
return err;
}
- err = mlx5e_add_promisc_rule(priv);
+ err = mlx5e_add_promisc_rule(fs);
if (err)
goto err_destroy_promisc_table;
@@ -762,34 +767,31 @@ err_destroy_promisc_table:
return err;
}
-static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
+static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+ if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
return;
- mlx5_del_flow_rules(priv->fs.promisc.rule);
- priv->fs.promisc.rule = NULL;
+ mlx5_del_flow_rules(fs->promisc.rule);
+ fs->promisc.rule = NULL;
}
-static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
return;
- mlx5e_del_promisc_rule(priv);
- mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
- priv->fs.promisc.ft.t = NULL;
+ mlx5e_del_promisc_rule(fs);
+ mlx5_destroy_flow_table(fs->promisc.ft.t);
+ fs->promisc.ft.t = NULL;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
+ struct mlx5e_l2_table *ea = &fs->l2;
- struct mlx5e_l2_table *ea = &priv->fs.l2;
- struct net_device *ndev = priv->netdev;
-
- bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
- bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
- bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool rx_mode_enable = fs->state_destroy;
+ bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
@@ -801,32 +803,32 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
int err;
if (enable_promisc) {
- err = mlx5e_create_promisc_table(priv);
+ err = mlx5e_create_promisc_table(fs);
if (err)
enable_promisc = false;
- if (!priv->channels.params.vlan_strip_disable && !err)
- netdev_warn_once(ndev,
- "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
+ if (!fs->vlan_strip_disable && !err)
+ mlx5_core_warn_once(fs->mdev,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
}
if (enable_allmulti)
- mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
- mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
- mlx5e_handle_netdev_addr(priv);
+ mlx5e_handle_netdev_addr(fs, netdev);
if (disable_broadcast)
- mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
+ mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
if (disable_allmulti)
- mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
+ mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
if (disable_promisc)
- mlx5e_destroy_promisc_table(priv);
+ mlx5e_destroy_promisc_table(fs);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
ea->broadcast_enabled = broadcast_enabled;
- mlx5e_vport_context_update(priv);
+ mlx5e_vport_context_update(fs, netdev);
}
static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
@@ -841,9 +843,9 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0;
}
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
{
- ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
+ ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
}
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
@@ -861,7 +863,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -884,7 +886,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -898,18 +900,18 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
}
ttc_params->inner_ttc = tunnel;
- if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
- mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
+ mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
}
}
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai)
{
if (!IS_ERR_OR_NULL(ai->rule)) {
@@ -918,10 +920,10 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
}
}
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
@@ -939,7 +941,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
switch (type) {
case MLX5E_FULLMATCH:
@@ -957,8 +959,8 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
- netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
- __func__, mv_dmac);
+ mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n",
+ __func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
@@ -1044,12 +1046,12 @@ err_destroy_groups:
static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
{
- mlx5e_destroy_flow_table(&priv->fs.l2.ft);
+ mlx5e_destroy_flow_table(&priv->fs->l2.ft);
}
static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
- struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+ struct mlx5e_l2_table *l2_table = &priv->fs->l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -1060,7 +1062,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_L2_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1180,20 +1182,20 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
return err;
}
-static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
+static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft;
int err;
- ft = &priv->fs.vlan->ft;
+ ft = &fs->vlan->ft;
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
ft_attr.level = MLX5E_VLAN_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t))
return PTR_ERR(ft->t);
@@ -1207,7 +1209,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
if (err)
goto err_free_g;
- mlx5e_add_vlan_rules(priv);
+ mlx5e_fs_add_vlan_rules(fs);
return 0;
@@ -1222,33 +1224,33 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
+ mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
{
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return;
- mlx5_destroy_ttc_table(priv->fs.inner_ttc);
+ mlx5_destroy_ttc_table(priv->fs->inner_ttc);
}
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
}
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct ttc_params ttc_params = {};
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return 0;
mlx5e_set_inner_ttc_params(priv, &ttc_params);
- priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
- &ttc_params);
- if (IS_ERR(priv->fs.inner_ttc))
- return PTR_ERR(priv->fs.inner_ttc);
+ priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev,
+ &ttc_params);
+ if (IS_ERR(priv->fs->inner_ttc))
+ return PTR_ERR(priv->fs->inner_ttc);
return 0;
}
@@ -1257,9 +1259,9 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
mlx5e_set_ttc_params(priv, &ttc_params, true);
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc))
- return PTR_ERR(priv->fs.ttc);
+ priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params);
+ if (IS_ERR(priv->fs->ttc))
+ return PTR_ERR(priv->fs->ttc);
return 0;
}
@@ -1267,45 +1269,44 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!priv->fs->ns)
return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n",
+ err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_inner_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev,
- "Failed to create inner ttc table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev,
+ "Failed to create inner ttc table, err=%d\n", err);
goto err_destroy_arfs_tables;
}
err = mlx5e_create_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n",
+ err);
goto err_destroy_inner_ttc_table;
}
err = mlx5e_create_l2_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n",
+ err);
goto err_destroy_ttc_table;
}
- err = mlx5e_create_vlan_table(priv);
+ err = mlx5e_fs_create_vlan_table(priv->fs);
if (err) {
- netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n",
+ err);
goto err_destroy_l2_table;
}
@@ -1342,16 +1343,69 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
}
-int mlx5e_fs_init(struct mlx5e_priv *priv)
+static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
+ if (!fs->vlan)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
{
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
+ kvfree(fs->vlan);
+}
+
+static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->tc = mlx5e_tc_table_alloc();
+ if (IS_ERR(fs->tc))
return -ENOMEM;
return 0;
}
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_tc_table_free(fs->tc);
+}
+
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy)
+{
+ struct mlx5e_flow_steering *fs;
+ int err;
+
+ fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ goto err;
+
+ fs->mdev = mdev;
+ fs->state_destroy = state_destroy;
+ if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
+ err = mlx5e_fs_vlan_alloc(fs);
+ if (err)
+ goto err_free_fs;
+ }
+
+ if (mlx5e_profile_feature_cap(profile, FS_TC)) {
+ err = mlx5e_fs_tc_alloc(fs);
+ if (err)
+ goto err_free_vlan;
+ }
+
+ return fs;
+err_free_fs:
+ kvfree(fs);
+err_free_vlan:
+ mlx5e_fs_vlan_free(fs);
+err:
+ return NULL;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
+ mlx5e_fs_tc_free(fs);
+ mlx5e_fs_vlan_free(fs);
+ kvfree(fs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index ad0d234632a3..3e4bc7836ef4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -81,18 +81,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
- eth_ft = &priv->fs.ethtool.l2_ft[prio];
+ eth_ft = &priv->fs->ethtool.l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
@@ -383,14 +383,14 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
struct mlx5e_ethtool_rule *iter;
- struct list_head *head = &priv->fs.ethtool.rules;
+ struct list_head *head = &priv->fs->ethtool.rules;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
- priv->fs.ethtool.tot_num_rules++;
+ priv->fs->ethtool.tot_num_rules++;
list_add(&rule->list, head);
}
@@ -507,7 +507,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
- priv->fs.ethtool.tot_num_rules--;
+ priv->fs->ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
@@ -517,7 +517,7 @@ static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
{
struct mlx5e_ethtool_rule *iter;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
@@ -742,10 +742,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
eth_rule->flow_spec = *fs;
eth_rule->eth_ft = eth_ft;
- if (!eth_ft->ft) {
- err = -EINVAL;
- goto del_ethtool_rule;
- }
+
rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -791,7 +788,7 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
@@ -834,13 +831,13 @@ void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
- list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+ list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
del_ethtool_rule(priv, iter);
}
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
- INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+ INIT_LIST_HEAD(&priv->fs->ethtool.rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
@@ -966,7 +963,7 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ info->rule_cnt = priv->fs->ethtool.tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 087952b84ccb..d858667736a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -31,7 +31,6 @@
*/
#include <net/tc_act/tc_gact.h>
-#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <net/geneve.h>
@@ -64,6 +63,7 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "en/htb.h"
#include "qos.h"
#include "en/trap.h"
@@ -1912,8 +1912,7 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
{
int tc;
- if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
- !params->mqprio.channel.rl) {
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
*hw_id = 0;
return 0;
}
@@ -1922,7 +1921,14 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
if (tc < 0)
return tc;
- return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+ if (tc >= params->mqprio.num_tc) {
+ WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
+ tc, params->mqprio.num_tc);
+ return -EINVAL;
+ }
+
+ *hw_id = params->mqprio.channel.hw_id[tc];
+ return 0;
}
static int mlx5e_open_sqs(struct mlx5e_channel *c,
@@ -2383,9 +2389,11 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- err = mlx5e_qos_open_queues(priv, chs);
- if (err)
- goto err_close_ptp;
+ if (priv->htb) {
+ err = mlx5e_qos_open_queues(priv, chs);
+ if (err)
+ goto err_close_ptp;
+ }
mlx5e_health_channels_update(priv);
kvfree(cparam);
@@ -2567,9 +2575,11 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
{
- int qos_queues, nch, ntc, num_txqs, err;
+ int nch, ntc, num_txqs, err;
+ int qos_queues = 0;
- qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+ if (priv->htb)
+ qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
nch = priv->channels.params.num_channels;
ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
@@ -2615,13 +2625,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
- if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
- if (priv->mqprio_rl) {
- mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
- mlx5e_mqprio_rl_free(priv->mqprio_rl);
- }
- priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
- }
return 0;
@@ -2724,7 +2727,8 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
- mlx5e_qos_activate_queues(priv);
+ if (priv->htb)
+ mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
/* dev_watchdog() wants all TX queues to be started when the carrier is
@@ -2841,7 +2845,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
new_chs.params = *params;
- mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id);
+ mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
err = mlx5e_open_channels(priv, &new_chs);
if (err)
@@ -2897,7 +2901,7 @@ int mlx5e_open_locked(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id);
+ mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
set_bit(MLX5E_STATE_OPENED, &priv->state);
@@ -3135,6 +3139,12 @@ err_close_tises:
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ priv->mqprio_rl = NULL;
+ }
+ mlx5e_accel_cleanup_tx(priv);
mlx5e_destroy_tises(priv);
}
@@ -3203,19 +3213,38 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
- params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
+static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
+ struct mlx5e_mqprio_rl *rl)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+ u32 hw_id = 0;
+
+ if (rl)
+ mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
+ params->mqprio.channel.hw_id[tc] = hw_id;
+ }
+}
+
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt,
+ struct tc_mqprio_qopt_offload *mqprio,
struct mlx5e_mqprio_rl *rl)
{
+ int tc;
+
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
- params->mqprio.num_tc = qopt->num_tc;
- params->mqprio.channel.rl = rl;
- mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
+ params->mqprio.num_tc = mqprio->qopt.num_tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
+
+ mlx5e_mqprio_rl_update_params(params, rl);
+ mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
}
static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
@@ -3241,6 +3270,12 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
+ if (!err && priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ priv->mqprio_rl = NULL;
+ }
+
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
mlx5e_get_dcb_num_tc(&priv->channels.params));
return err;
@@ -3299,16 +3334,38 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
-static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
{
int tc;
- for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
- if (mqprio->max_rate[tc])
+ for (tc = 0; tc < num_tc; tc++)
+ if (max_rate[tc])
return true;
return false;
}
+static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
+ u8 num_tc, u64 max_rate[])
+{
+ struct mlx5e_mqprio_rl *rl;
+ int err;
+
+ if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
+ return NULL;
+
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return ERR_PTR(err);
+ }
+
+ return rl;
+}
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
@@ -3322,32 +3379,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err)
return err;
- rl = NULL;
- if (mlx5e_mqprio_rate_limit(mqprio)) {
- rl = mlx5e_mqprio_rl_alloc();
- if (!rl)
- return -ENOMEM;
- err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
- mqprio->max_rate);
- if (err) {
- mlx5e_mqprio_rl_free(rl);
- return err;
- }
- }
+ rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
+ if (IS_ERR(rl))
+ return PTR_ERR(rl);
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
+ mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
- if (err && rl) {
- mlx5e_mqprio_rl_cleanup(rl);
- mlx5e_mqprio_rl_free(rl);
+ if (err) {
+ if (rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+ return err;
}
- return err;
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = rl;
+
+ return 0;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3356,7 +3413,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(priv->htb.maj_id))
+ if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
return -EINVAL;
switch (mqprio->mode) {
@@ -3369,47 +3426,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
}
}
-static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
-{
- int res;
-
- switch (htb->command) {
- case TC_HTB_CREATE:
- return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
- htb->extack);
- case TC_HTB_DESTROY:
- return mlx5e_htb_root_del(priv);
- case TC_HTB_LEAF_ALLOC_QUEUE:
- res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
- htb->rate, htb->ceil, htb->extack);
- if (res < 0)
- return res;
- htb->qid = res;
- return 0;
- case TC_HTB_LEAF_TO_INNER:
- return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
- htb->rate, htb->ceil, htb->extack);
- case TC_HTB_LEAF_DEL:
- return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack);
- case TC_HTB_LEAF_DEL_LAST:
- case TC_HTB_LEAF_DEL_LAST_FORCE:
- return mlx5e_htb_leaf_del_last(priv, htb->classid,
- htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
- htb->extack);
- case TC_HTB_NODE_MODIFY:
- return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
- htb->extack);
- case TC_HTB_LEAF_QUERY_QUEUE:
- res = mlx5e_get_txq_by_classid(priv, htb->classid);
- if (res < 0)
- return res;
- htb->qid = res;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -3443,7 +3459,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
return err;
case TC_SETUP_QDISC_HTB:
mutex_lock(&priv->state_lock);
- err = mlx5e_setup_tc_htb(priv, type_data);
+ err = mlx5e_htb_setup_tc(priv, type_data);
mutex_unlock(&priv->state_lock);
return err;
default:
@@ -3594,20 +3610,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
- if (enable && priv->xsk.refcnt) {
- netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
- priv->xsk.refcnt);
- err = -EINVAL;
- goto out;
- }
-
cur_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
- netdev_warn(netdev, "can't set LRO with legacy RQ\n");
- err = -EINVAL;
- goto out;
- }
-
new_params = *cur_params;
if (enable)
@@ -3676,6 +3679,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
@@ -3685,12 +3689,14 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
}
#endif
- if (!enable && priv->htb.maj_id) {
+ mutex_lock(&priv->state_lock);
+ if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
- return -EINVAL;
+ err = -EINVAL;
}
+ mutex_unlock(&priv->state_lock);
- return 0;
+ return err;
}
static int set_feature_rx_all(struct net_device *netdev, bool enable)
@@ -3772,20 +3778,45 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
+ priv->fs->vlan_strip_disable = !enable;
priv->channels.params.vlan_strip_disable = !enable;
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
- if (err)
+ if (err) {
+ priv->fs->vlan_strip_disable = enable;
priv->channels.params.vlan_strip_disable = enable;
-
+ }
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
+}
+
#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
@@ -3883,8 +3914,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!priv->fs.vlan ||
- !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
+ if (!priv->fs->vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -3916,6 +3947,11 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
if (priv->xsk.refcnt) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
+ priv->xsk.refcnt);
+ features &= ~NETIF_F_LRO;
+ }
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
@@ -5002,6 +5038,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_flow_steering *fs;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -5009,11 +5046,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
- err = mlx5e_fs_init(priv);
- if (err) {
+ fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!fs) {
+ err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
return err;
}
+ priv->fs = fs;
err = mlx5e_ipsec_init(priv);
if (err)
@@ -5032,7 +5072,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_fs_cleanup(priv);
+ mlx5e_fs_cleanup(priv->fs);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -5110,6 +5150,23 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL;
}
+static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
+{
+ struct mlx5e_params *params;
+ struct mlx5e_mqprio_rl *rl;
+
+ params = &priv->channels.params;
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
+ return;
+
+ rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
+ params->mqprio.channel.max_rate);
+ if (IS_ERR(rl))
+ rl = NULL;
+ priv->mqprio_rl = rl;
+ mlx5e_mqprio_rl_update_params(params, rl);
+}
+
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
int err;
@@ -5120,8 +5177,17 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
return err;
}
+ err = mlx5e_accel_init_tx(priv);
+ if (err)
+ goto err_destroy_tises;
+
+ mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+ return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
@@ -5129,7 +5195,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, netdev);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
@@ -5214,7 +5280,9 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
- BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
+ BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_TC),
};
static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
@@ -5264,6 +5332,14 @@ int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
+ mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
}
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
+}
+
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
@@ -5293,7 +5369,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (err)
goto err_free_cpumask;
- hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5350,14 +5425,9 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
- for (i = 0; i < priv->htb.max_qos_sqs; i++)
- kfree(priv->htb.qos_sq_stats[i]);
- kvfree(priv->htb.qos_sq_stats);
-
- if (priv->mqprio_rl) {
- mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
- mlx5e_mqprio_rl_free(priv->mqprio_rl);
- }
+ for (i = 0; i < priv->htb_max_qos_sqs; i++)
+ kfree(priv->htb_qos_sq_stats[i]);
+ kvfree(priv->htb_qos_sq_stats);
memset(priv, 0, sizeof(*priv));
}
@@ -5447,6 +5517,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
int err;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
@@ -5506,6 +5578,8 @@ err_cleanup_tx:
out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
}
@@ -5515,6 +5589,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (profile->disable)
profile->disable(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f797fd97d305..4c1599de652c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -229,7 +229,7 @@ mlx5e_rep_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
static int
@@ -718,6 +718,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
+ mlx5e_fs_cleanup(priv->fs);
mlx5e_ipsec_cleanup(priv);
}
@@ -728,8 +729,8 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
mlx5e_set_ttc_params(priv, &ttc_params, false);
@@ -738,9 +739,9 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc)) {
- err = PTR_ERR(priv->fs.ttc);
+ priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+ if (IS_ERR(priv->fs->ttc)) {
+ err = PTR_ERR(priv->fs->ttc);
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
@@ -760,7 +761,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
- rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
return 0;
}
@@ -835,11 +836,20 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
+ }
+
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
@@ -873,13 +883,15 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -888,7 +900,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index adf5cc6a7b8c..dec183ccd4ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -62,6 +62,7 @@ struct mlx5_tc_int_port_priv;
struct mlx5e_rep_bond;
struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
+struct mlx5e_flow_meters;
struct mlx5_rep_uplink_priv {
/* indirect block callbacks are invoked on bind/unbind events
@@ -97,6 +98,8 @@ struct mlx5_rep_uplink_priv {
/* OVS internal port support */
struct mlx5e_tc_int_port_priv *int_port_priv;
+
+ struct mlx5e_flow_meters *flow_meters;
};
struct mlx5e_rep_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1e87bb2b7541..7409829d1201 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
int i;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
- stats = READ_ONCE(priv->htb.qos_sq_stats);
+ max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+ stats = READ_ONCE(priv->htb_qos_sq_stats);
for (i = 0; i < max_qos_sqs; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
@@ -2184,13 +2186,13 @@ static const struct counter_desc qos_sq_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
{
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+ return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
{
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
int i, qid;
for (qid = 0; qid < max_qos_sqs; qid++)
@@ -2208,8 +2210,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
int i, qid;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
- stats = READ_ONCE(priv->htb.qos_sq_stats);
+ max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+ stats = READ_ONCE(priv->htb_qos_sq_stats);
for (qid = 0; qid < max_qos_sqs; qid++) {
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index e48b15b55b6f..ed4fc940e4ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe;
u64 abort;
u64 abort_abs_diff_ns;
+ u64 resync_cqe;
+ u64 resync_event;
};
struct mlx5e_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9ca2c8763237..f154bda668ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -59,6 +59,7 @@
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "en/tc/act/act.h"
+#include "en/tc/post_meter.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -70,6 +71,30 @@
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
+struct mlx5e_tc_table {
+ /* Protects the dynamic assignment of the t parameter
+ * which is the nic tc root table.
+ */
+ struct mutex t_lock;
+ struct mlx5e_priv *priv;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_table *miss_t;
+ struct mlx5_fs_chains *chains;
+ struct mlx5e_post_act *post_act;
+
+ struct rhashtable ht;
+
+ struct mod_hdr_tbl mod_hdr;
+ struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
+
+ struct mlx5_tc_ct_priv *ct;
+ struct mapping_ctx *mapping;
+};
+
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
@@ -104,8 +129,27 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
.mlen = 16,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
+ [PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
+{
+ struct mlx5e_tc_table *tc;
+
+ tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
+ return tc ? tc : ERR_PTR(-ENOMEM);
+}
+
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
+{
+ kvfree(tc);
+}
+
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
+{
+ return tc->chains;
+}
+
/* To avoid false lock dependency warning set the tc_ht lock
* class different than the lock class of the ht being used when deleting
* last flow from a group and then deleting a group, we get into del_sw_flow_group()
@@ -240,6 +284,30 @@ mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
return NULL;
}
+struct mlx5e_flow_meters *
+mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_priv *priv;
+
+ if (is_mdev_switchdev_mode(dev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ priv = netdev_priv(uplink_rpriv->netdev);
+ if (!uplink_priv->flow_meters)
+ uplink_priv->flow_meters =
+ mlx5e_flow_meters_init(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ uplink_priv->post_act);
+ if (!IS_ERR(uplink_priv->flow_meters))
+ return uplink_priv->flow_meters;
+ }
+
+ return NULL;
+}
+
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
@@ -254,7 +322,7 @@ get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv;
}
- return priv->fs.tc.ct;
+ return priv->fs->tc->ct;
}
static struct mlx5e_tc_psample *
@@ -288,7 +356,7 @@ get_post_action(struct mlx5e_priv *priv)
return uplink_priv->post_act;
}
- return priv->fs.tc.post_act;
+ return priv->fs->tc->post_act;
}
struct mlx5_flow_handle *
@@ -319,12 +387,62 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+static bool
+is_flow_meter_action(struct mlx5_flow_attr *attr)
+{
+ return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
+}
+
+static int
+mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_post_act *post_act = get_post_action(priv);
+ struct mlx5e_post_meter_priv *post_meter;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
+ if (IS_ERR(meter)) {
+ mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
+ return PTR_ERR(meter);
+ }
+
+ ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
+ post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
+ meter->red_counter);
+ if (IS_ERR(post_meter)) {
+ mlx5_core_err(priv->mdev, "Failed to init post meter\n");
+ goto err_meter_init;
+ }
+
+ attr->meter_attr.meter = meter;
+ attr->meter_attr.post_meter = post_meter;
+ attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ return 0;
+
+err_meter_init:
+ mlx5e_tc_meter_put(meter);
+ return PTR_ERR(post_meter);
+}
+
+static void
+mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
+{
+ mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
+ mlx5e_tc_meter_put(attr->meter_attr.meter);
+}
+
struct mlx5_flow_handle *
mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
if (attr->flags & MLX5_ATTR_FLAG_CT) {
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
@@ -341,6 +459,12 @@ mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
+ if (is_flow_meter_action(attr)) {
+ err = mlx5e_tc_add_flow_meter(priv, attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
@@ -367,6 +491,9 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
}
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+
+ if (attr->meter_attr.meter)
+ mlx5e_tc_del_flow_meter(attr);
}
int
@@ -484,7 +611,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
- &priv->fs.tc.mod_hdr;
+ &priv->fs->tc->mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
@@ -702,7 +829,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
- mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
+ mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
return 0;
@@ -792,7 +919,7 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
- hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
@@ -807,10 +934,10 @@ static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
/* no more hairpin flows for us, release the hairpin pair */
- if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
@@ -894,10 +1021,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
if (err)
return err;
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
@@ -909,7 +1036,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
return -ENOMEM;
}
@@ -921,9 +1048,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
- hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -999,10 +1126,10 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
@@ -1011,6 +1138,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft;
int dest_ix = 0;
+ nic_chains = mlx5e_nic_chains(tc);
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = nic_attr->flow_tag;
@@ -1035,7 +1163,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
}
dest_ix++;
}
@@ -1063,7 +1191,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs.tc.t);
+ rule = ERR_CAST(priv->fs->tc->t);
goto err_ft_get;
}
}
@@ -1165,7 +1293,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
mlx5_del_flow_rules(rule);
@@ -1182,7 +1310,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *attr = flow->attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
flow_flag_clear(flow, OFFLOADED);
@@ -1194,13 +1322,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
- mutex_lock(&priv->fs.tc.t_lock);
+ mutex_lock(&priv->fs->tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
- mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
- priv->fs.tc.t = NULL;
+ mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
+ priv->fs->tc->t = NULL;
}
- mutex_unlock(&priv->fs.tc.t_lock);
+ mutex_unlock(&priv->fs->tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -3936,7 +4064,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
rpriv = priv->ppriv;
return &rpriv->tc_ht;
} else /* NIC offload */
- return &priv->fs.tc.ht;
+ return &priv->fs->tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
@@ -4519,9 +4647,9 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
-static int mlx5e_policer_validate(const struct flow_action *action,
- const struct flow_action_entry *act,
- struct netlink_ext_ack *extack)
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
@@ -4655,11 +4783,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
+ mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
+ hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
@@ -4674,7 +4802,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5e_flow_steering *fs;
struct mlx5e_priv *peer_priv;
struct mlx5e_tc_table *tc;
struct mlx5e_priv *priv;
@@ -4685,8 +4812,7 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
- fs = container_of(tc, struct mlx5e_flow_steering, tc);
- priv = container_of(fs, struct mlx5e_priv, fs);
+ priv = tc->priv;
peer_priv = netdev_priv(ndev);
if (priv == peer_priv ||
!(priv->netdev->features & NETIF_F_HW_TC))
@@ -4715,7 +4841,7 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
- struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
+ struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
@@ -4737,12 +4863,12 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_flow_table(priv->fs.tc.miss_t);
+ mlx5_destroy_flow_table(priv->fs->tc->miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
@@ -4753,6 +4879,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
mutex_init(&tc->t_lock);
mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
+ tc->priv = priv;
err = rhashtable_init(&tc->ht, &tc_ht_params);
if (err)
@@ -4782,7 +4909,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = priv->fs.tc.miss_t;
+ attr.default_ft = priv->fs->tc->miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
@@ -4792,7 +4919,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
}
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
- tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
+ tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
@@ -4831,7 +4958,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
@@ -4955,6 +5082,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
+ mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
@@ -5035,7 +5163,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
int err;
@@ -5060,7 +5188,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
- zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
+ zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index e2a1250aeca1..6ce1ab6b86b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -39,6 +39,7 @@
#include "en/tc_ct.h"
#include "en/tc_tun.h"
#include "en/tc/int_port.h"
+#include "en/tc/meter.h"
#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -71,6 +72,7 @@ struct mlx5_flow_attr {
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_ct_attr ct_attr;
struct mlx5e_sample_attr sample_attr;
+ struct mlx5e_meter_attr meter_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
@@ -83,6 +85,7 @@ struct mlx5_flow_attr {
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
+ u32 exe_aso_type;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
struct {
@@ -229,6 +232,7 @@ enum mlx5e_tc_attr_to_reg {
FTEID_TO_REG,
NIC_CHAIN_TO_REG,
NIC_ZONE_RESTORE_TO_REG,
+ PACKET_COLOR_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
@@ -241,6 +245,10 @@ struct mlx5e_tc_attr_to_reg_mapping {
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
+#define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
+#define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
+#define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
@@ -348,6 +356,8 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
#endif
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
@@ -368,6 +378,8 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
+static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
+static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 9a7250be229f..27f791feb517 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -152,14 +152,14 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ihs = skb_tcp_all_headers(skb);
if (ipv6_has_hopopt_jumbo(skb)) {
*hopbyhop = sizeof(struct hop_jumbo_hdr);
ihs -= sizeof(struct hop_jumbo_hdr);
@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq);
}
+static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
+ ptpsq->ts_cqe_ctr_mask);
+}
+
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
+ if (unlikely(sq->ptpsq))
+ mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 05e08cec5a8c..4fbff7bcc155 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021 Mellanox Technologies. */
+#include <linux/build_bug.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <net/netevent.h>
@@ -12,26 +13,57 @@
#define CREATE_TRACE_POINTS
#include "diag/bridge_tracepoint.h"
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE 12000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE 16000
#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
-
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE == 64000);
+
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE 16000
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE (32000 - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE == 64000);
#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
@@ -63,12 +95,14 @@ struct mlx5_esw_bridge {
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
+ struct mlx5_flow_group *egress_qinq_fg;
struct mlx5_flow_group *egress_mac_fg;
struct mlx5_flow_group *egress_miss_fg;
struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
struct mlx5_flow_handle *egress_miss_handle;
unsigned long ageing_time;
u32 flags;
+ u16 vlan_proto;
};
static void
@@ -138,7 +172,9 @@ mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
+mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -154,30 +190,53 @@ mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flo
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(ingress_ft, in);
kvfree(in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
+ vlan_proto, PTR_ERR(fg));
return fg;
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ingress_ft)
+mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
+ u16 vlan_proto, struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -193,27 +252,48 @@ mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
-
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
"Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
PTR_ERR(fg));
-
kvfree(in);
return fg;
}
static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -250,7 +330,9 @@ mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *egress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -265,13 +347,14 @@ mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
@@ -283,6 +366,25 @@ mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
}
static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *egress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
+}
+
+static struct mlx5_flow_group *
mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -346,7 +448,7 @@ mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
+ struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
struct mlx5_flow_table *ingress_ft, *skip_ft;
struct mlx5_eswitch *esw = br_offloads->esw;
int err;
@@ -374,10 +476,22 @@ mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
goto err_vlan_fg;
}
- filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
- if (IS_ERR(filter_fg)) {
- err = PTR_ERR(filter_fg);
- goto err_filter_fg;
+ vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
+ if (IS_ERR(vlan_filter_fg)) {
+ err = PTR_ERR(vlan_filter_fg);
+ goto err_vlan_filter_fg;
+ }
+
+ qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
+ qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
+ if (IS_ERR(qinq_filter_fg)) {
+ err = PTR_ERR(qinq_filter_fg);
+ goto err_qinq_filter_fg;
}
mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
@@ -389,13 +503,19 @@ mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
br_offloads->ingress_ft = ingress_ft;
br_offloads->skip_ft = skip_ft;
br_offloads->ingress_vlan_fg = vlan_fg;
- br_offloads->ingress_filter_fg = filter_fg;
+ br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
+ br_offloads->ingress_qinq_fg = qinq_fg;
+ br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
br_offloads->ingress_mac_fg = mac_fg;
return 0;
err_mac_fg:
- mlx5_destroy_flow_group(filter_fg);
-err_filter_fg:
+ mlx5_destroy_flow_group(qinq_filter_fg);
+err_qinq_filter_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
+ mlx5_destroy_flow_group(vlan_filter_fg);
+err_vlan_filter_fg:
mlx5_destroy_flow_group(vlan_fg);
err_vlan_fg:
mlx5_destroy_flow_table(skip_ft);
@@ -409,8 +529,12 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa
{
mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
br_offloads->ingress_mac_fg = NULL;
- mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
- br_offloads->ingress_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
+ br_offloads->ingress_qinq_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
+ br_offloads->ingress_qinq_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
+ br_offloads->ingress_vlan_filter_fg = NULL;
mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
br_offloads->ingress_vlan_fg = NULL;
mlx5_destroy_flow_table(br_offloads->skip_ft);
@@ -428,7 +552,7 @@ static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
+ struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
struct mlx5_flow_handle *miss_handle = NULL;
struct mlx5_eswitch *esw = br_offloads->esw;
@@ -447,6 +571,12 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
goto err_vlan_fg;
}
+ qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
@@ -491,6 +621,7 @@ skip_miss_flow:
bridge->egress_ft = egress_ft;
bridge->egress_vlan_fg = vlan_fg;
+ bridge->egress_qinq_fg = qinq_fg;
bridge->egress_mac_fg = mac_fg;
bridge->egress_miss_fg = miss_fg;
bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
@@ -498,6 +629,8 @@ skip_miss_flow:
return 0;
err_mac_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
mlx5_destroy_flow_group(vlan_fg);
err_vlan_fg:
mlx5_destroy_flow_table(egress_ft);
@@ -515,6 +648,7 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
if (bridge->egress_miss_fg)
mlx5_destroy_flow_group(bridge->egress_miss_fg);
mlx5_destroy_flow_group(bridge->egress_mac_fg);
+ mlx5_destroy_flow_group(bridge->egress_qinq_fg);
mlx5_destroy_flow_group(bridge->egress_vlan_fg);
mlx5_destroy_flow_table(bridge->egress_ft);
}
@@ -559,10 +693,17 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
flow_act.pkt_reformat = vlan->pkt_reformat_push;
flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) {
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
@@ -645,10 +786,17 @@ mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *a
MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
@@ -696,10 +844,17 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
flow_act.pkt_reformat = vlan->pkt_reformat_pop;
}
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
@@ -774,6 +929,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
bridge->ifindex = ifindex;
bridge->refcnt = 1;
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
+ bridge->vlan_proto = ETH_P_8021Q;
list_add(&bridge->list, &br_offloads->bridges);
return bridge;
@@ -911,12 +1067,13 @@ mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
}
static int
-mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_eswitch *esw)
{
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
- } vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
+ } vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
@@ -1008,36 +1165,58 @@ mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct
vlan->pkt_mod_hdr_push_mark = NULL;
}
-static struct mlx5_esw_bridge_vlan *
-mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
- struct mlx5_eswitch *esw)
+static int
+mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_eswitch *esw)
{
- struct mlx5_esw_bridge_vlan *vlan;
int err;
- vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
- return ERR_PTR(-ENOMEM);
-
- vlan->vid = vid;
- vlan->flags = flags;
- INIT_LIST_HEAD(&vlan->fdb_list);
-
if (flags & BRIDGE_VLAN_INFO_PVID) {
- err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
+ err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
if (err)
- goto err_vlan_push;
+ return err;
err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
if (err)
goto err_vlan_push_mark;
}
+
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
if (err)
goto err_vlan_pop;
}
+ return 0;
+
+err_vlan_pop:
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
+err_vlan_push_mark:
+ if (vlan->pkt_reformat_push)
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
+ return err;
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ int err;
+
+ vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ INIT_LIST_HEAD(&vlan->fdb_list);
+
+ err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, vlan, esw);
+ if (err)
+ goto err_vlan_push_pop;
+
err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
if (err)
goto err_xa_insert;
@@ -1048,13 +1227,11 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err_xa_insert:
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
-err_vlan_pop:
if (vlan->pkt_mod_hdr_push_mark)
mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
-err_vlan_push_mark:
if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
-err_vlan_push:
+err_vlan_push_pop:
kvfree(vlan);
return ERR_PTR(err);
}
@@ -1102,6 +1279,50 @@ static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
}
+static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_vlan *vlan;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&port->vlans, i, vlan) {
+ mlx5_esw_bridge_vlan_flush(vlan, bridge);
+ err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, vlan,
+ br_offloads->esw);
+ if (err) {
+ esw_warn(br_offloads->esw->dev,
+ "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
+ vlan->vid, bridge->vlan_proto, port->vport_num,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_port *port;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&br_offloads->ports, i, port) {
+ if (port->bridge != bridge)
+ continue;
+
+ err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
@@ -1287,6 +1508,32 @@ int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, boo
return 0;
}
+int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id,
+ br_offloads);
+ if (!port)
+ return -EINVAL;
+
+ bridge = port->bridge;
+ if (bridge->vlan_proto == proto)
+ return 0;
+ if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
+ esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
+ return -EOPNOTSUPP;
+ }
+
+ mlx5_esw_bridge_fdb_flush(bridge);
+ bridge->vlan_proto = proto;
+ mlx5_esw_bridge_vlans_recreate(bridge);
+
+ return 0;
+}
+
static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
@@ -1434,7 +1681,8 @@ int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
}
- vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw);
+ vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
+ br_offloads->esw);
if (IS_ERR(vlan)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
return PTR_ERR(vlan);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index efc39975226e..10851a515bca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -26,7 +26,9 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_flow_table *ingress_ft;
struct mlx5_flow_group *ingress_vlan_fg;
- struct mlx5_flow_group *ingress_filter_fg;
+ struct mlx5_flow_group *ingress_vlan_filter_fg;
+ struct mlx5_flow_group *ingress_qinq_fg;
+ struct mlx5_flow_group *ingress_qinq_filter_fg;
struct mlx5_flow_group *ingress_mac_fg;
struct mlx5_flow_table *skip_ft;
@@ -60,6 +62,8 @@ int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsign
struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
+ struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
new file mode 100644
index 000000000000..2db13c71e88c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include "eswitch.h"
+
+enum vnic_diag_counter {
+ MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE,
+ MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW,
+ MLX5_VNIC_DIAG_COMP_EQ_OVERRUN,
+ MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN,
+ MLX5_VNIC_DIAG_CQ_OVERRUN,
+ MLX5_VNIC_DIAG_INVALID_COMMAND,
+ MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
+};
+
+static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
+ u32 *val)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
+ struct mlx5_core_dev *dev = vport->dev;
+ u16 vport_num = vport->vport;
+ void *vnic_diag_out;
+ int err;
+
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
+ if (!mlx5_esw_is_manager_vport(dev->priv.eswitch, vport_num))
+ MLX5_SET(query_vnic_env_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ vnic_diag_out = MLX5_ADDR_OF(query_vnic_env_out, out, vport_env);
+ switch (counter) {
+ case MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, total_error_queues);
+ break;
+ case MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out,
+ send_queue_priority_update_flow);
+ break;
+ case MLX5_VNIC_DIAG_COMP_EQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, comp_eq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, async_eq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_CQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, cq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_INVALID_COMMAND:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, invalid_command);
+ break;
+ case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
+ break;
+ }
+
+ return 0;
+}
+
+static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
+ enum vnic_diag_counter type)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = mlx5_esw_query_vnic_diag(vport, type, &val);
+ if (ret)
+ return ret;
+
+ seq_printf(file, "%d\n", val);
+ return 0;
+}
+
+static int total_q_under_processor_handle_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE);
+}
+
+static int send_queue_priority_update_flow_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private,
+ MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW);
+}
+
+static int comp_eq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_COMP_EQ_OVERRUN);
+}
+
+static int async_eq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN);
+}
+
+static int cq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_CQ_OVERRUN);
+}
+
+static int invalid_command_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_INVALID_COMMAND);
+}
+
+static int quota_exceeded_command_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
+}
+
+DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
+DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
+DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
+DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
+DEFINE_SHOW_ATTRIBUTE(cq_overrun);
+DEFINE_SHOW_ATTRIBUTE(invalid_command);
+DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
+
+void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ debugfs_remove_recursive(vport->dbgfs);
+ vport->dbgfs = NULL;
+}
+
+/* vnic diag dir name is "pf", "ecpf" or "{vf/sf}_xxxx" */
+#define VNIC_DIAG_DIR_NAME_MAX_LEN 8
+
+void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+ struct dentry *vnic_diag;
+ char dir_name[VNIC_DIAG_DIR_NAME_MAX_LEN];
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ return;
+
+ if (vport_num == MLX5_VPORT_PF) {
+ strcpy(dir_name, "pf");
+ } else if (vport_num == MLX5_VPORT_ECPF) {
+ strcpy(dir_name, "ecpf");
+ } else {
+ err = snprintf(dir_name, VNIC_DIAG_DIR_NAME_MAX_LEN, "%s_%d", is_sf ? "sf" : "vf",
+ is_sf ? sf_num : vport_num - MLX5_VPORT_FIRST_VF);
+ if (WARN_ON(err < 0))
+ return;
+ }
+
+ vport->dbgfs = debugfs_create_dir(dir_name, esw->dbgfs);
+ vnic_diag = debugfs_create_dir("vnic_diag", vport->dbgfs);
+
+ if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) {
+ debugfs_create_file("total_q_under_processor_handle", 0444, vnic_diag, vport,
+ &total_q_under_processor_handle_fops);
+ debugfs_create_file("send_queue_priority_update_flow", 0444, vnic_diag, vport,
+ &send_queue_priority_update_flow_fops);
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) {
+ debugfs_create_file("comp_eq_overrun", 0444, vnic_diag, vport,
+ &comp_eq_overrun_fops);
+ debugfs_create_file("async_eq_overrun", 0444, vnic_diag, vport,
+ &async_eq_overrun_fops);
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun))
+ debugfs_create_file("cq_overrun", 0444, vnic_diag, vport, &cq_overrun_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, invalid_command_count))
+ debugfs_create_file("invalid_command", 0444, vnic_diag, vport,
+ &invalid_command_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
+ debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
+ &quota_exceeded_command_fops);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 7f9b96d9537e..9bc7be95db54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -87,11 +87,11 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devlink_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register(devlink, dl_port, dl_port_index);
if (err)
goto reg_err;
- err = devlink_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport);
if (err)
goto rate_err;
@@ -99,7 +99,7 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
return 0;
rate_err:
- devlink_port_unregister(dl_port);
+ devl_port_unregister(dl_port);
reg_err:
mlx5_esw_dl_port_free(dl_port);
return err;
@@ -118,10 +118,10 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devlink_rate_leaf_destroy(vport->dl_port);
+ devl_rate_leaf_destroy(vport->dl_port);
}
- devlink_port_unregister(vport->dl_port);
+ devl_port_unregister(vport->dl_port);
mlx5_esw_dl_port_free(vport->dl_port);
vport->dl_port = NULL;
}
@@ -156,11 +156,11 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devlink_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register(devlink, dl_port, dl_port_index);
if (err)
return err;
- err = devlink_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport);
if (err)
goto rate_err;
@@ -168,7 +168,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
return 0;
rate_err:
- devlink_port_unregister(dl_port);
+ devl_port_unregister(dl_port);
return err;
}
@@ -182,9 +182,9 @@ void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devlink_rate_leaf_destroy(vport->dl_port);
+ devl_rate_leaf_destroy(vport->dl_port);
}
- devlink_port_unregister(vport->dl_port);
+ devl_port_unregister(vport->dl_port);
vport->dl_port = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 719ef26d23c0..6aa58044b949 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
+#include <linux/debugfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "esw/qos.h"
@@ -1002,6 +1003,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
if (err)
return err;
+ mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0);
err = esw_offloads_load_rep(esw, vport_num);
if (err)
goto err_rep;
@@ -1009,6 +1011,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err;
err_rep:
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
return err;
}
@@ -1016,6 +1019,7 @@ err_rep:
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
@@ -1152,8 +1156,6 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
{
const u32 *out;
- WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
-
if (num_vfs < 0)
return;
@@ -1186,6 +1188,9 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
int total_vports;
int err;
+ if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
+ return 0;
+
total_vports = mlx5_eswitch_get_total_vports(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
@@ -1203,6 +1208,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
} else {
esw_warn(dev, "ingress ACL is not supported by FW\n");
}
+ esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
return 0;
err:
@@ -1215,6 +1221,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
+ esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
mlx5_fs_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
@@ -1224,7 +1231,6 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
- * @mode: Eswitch mode to enable
* @num_vfs: Enable eswitch for given number of VFs. This is optional.
* Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
* Caller should pass num_vfs > 0 when enabling eswitch for
@@ -1238,7 +1244,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
* mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
* It returns 0 on success or error code on failure.
*/
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
@@ -1257,9 +1263,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
- esw->mode = mode;
-
- if (mode == MLX5_ESWITCH_LEGACY) {
+ if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
mlx5_rescan_drivers(esw->dev);
@@ -1269,22 +1273,19 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
if (err)
goto abort;
+ esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
- mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mlx5_esw_mode_change_notify(esw, mode);
+ mlx5_esw_mode_change_notify(esw, esw->mode);
return 0;
abort:
- esw->mode = MLX5_ESWITCH_NONE;
-
- if (mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
-
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1305,14 +1306,16 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (!mlx5_esw_allowed(esw))
return 0;
- toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
+ devl_assert_locked(priv_to_devlink(esw->dev));
+
+ toggle_lag = !mlx5_esw_is_fdb_created(esw);
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- if (esw->mode == MLX5_ESWITCH_NONE) {
- ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+ if (!mlx5_esw_is_fdb_created(esw)) {
+ ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
enum mlx5_eswitch_vport_event vport_events;
@@ -1330,55 +1333,81 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
return ret;
}
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
+/* When disabling sriov, free driver level resources. */
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
{
- struct devlink *devlink = priv_to_devlink(esw->dev);
- int old_mode;
-
- lockdep_assert_held_write(&esw->mode_lock);
-
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_allowed(esw))
return;
- esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ devl_assert_locked(priv_to_devlink(esw->dev));
+ down_write(&esw->mode_lock);
+ /* If driver is unloaded, this function is called twice by remove_one()
+ * and mlx5_unload(). Prevent the second call.
+ */
+ if (!esw->esw_funcs.num_vfs && !clear_vf)
+ goto unlock;
+
+ esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- /* Notify eswitch users that it is exiting from current mode.
- * So that it can do necessary cleanup before the eswitch is disabled.
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
+ /* If disabling sriov in switchdev mode, free meta rules here
+ * because it depends on num_vfs.
*/
- mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
+ struct devlink *devlink = priv_to_devlink(esw->dev);
- mlx5_eswitch_event_handlers_unregister(esw);
+ esw_offloads_del_send_to_vport_meta_rules(esw);
+ devl_rate_nodes_destroy(devlink);
+ }
- if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_legacy_disable(esw);
- else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_disable(esw);
+ esw->esw_funcs.num_vfs = 0;
- old_mode = esw->mode;
- esw->mode = MLX5_ESWITCH_NONE;
+unlock:
+ up_write(&esw->mode_lock);
+}
- if (old_mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
+/* Free resources for corresponding eswitch mode. It is called by devlink
+ * when changing eswitch mode or modprobe when unloading driver.
+ */
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
+{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
+
+ /* Notify eswitch users that it is exiting from current mode.
+ * So that it can do necessary cleanup before the eswitch is disabled.
+ */
+ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
- devlink_rate_nodes_destroy(devlink);
+ mlx5_eswitch_event_handlers_unregister(esw);
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
+
+ esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ esw_offloads_disable(esw);
+ else if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_legacy_disable(esw);
mlx5_esw_acls_ns_cleanup(esw);
- if (clear_vf)
- mlx5_eswitch_clear_vf_vports_info(esw);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ devl_rate_nodes_destroy(devlink);
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_allowed(esw))
return;
+ devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- mlx5_eswitch_disable_locked(esw, clear_vf);
- esw->esw_funcs.num_vfs = 0;
+ mlx5_eswitch_disable_locked(esw);
up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@@ -1573,7 +1602,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
- esw->mode = MLX5_ESWITCH_NONE;
+ esw->mode = MLX5_ESWITCH_LEGACY;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
@@ -1587,6 +1616,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
+ esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev));
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
esw->total_vports,
@@ -1610,6 +1640,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
+ debugfs_remove_recursive(esw->dbgfs);
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
@@ -1875,7 +1906,7 @@ u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
+ return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
@@ -1995,8 +2026,6 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
*/
void mlx5_esw_unlock(struct mlx5_eswitch *esw)
{
- if (!mlx5_esw_allowed(esw))
- return;
up_write(&esw->mode_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2754a732914d..87ce5a208cb5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -191,6 +191,7 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
+ struct dentry *dbgfs;
};
struct mlx5_esw_indir_table;
@@ -282,10 +283,15 @@ struct mlx5_esw_functions {
enum {
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
+ MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
};
struct mlx5_esw_bridge_offloads;
+enum {
+ MLX5_ESW_FDB_CREATED = BIT(0),
+};
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -331,12 +337,14 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
+ struct dentry *dbgfs;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw);
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
@@ -350,10 +358,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -575,6 +584,11 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
return dl_port_index & 0xffff;
}
+static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
+}
+
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
@@ -672,6 +686,9 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num);
+void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num);
+
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
@@ -719,7 +736,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
-static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2ce3728576d1..ed73132129aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
}
static void
-esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
- struct mlx5_flow_act *flow_act,
- struct mlx5_fs_chains *chains,
- int i)
+esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains, int i)
{
if (mlx5_chains_ignore_flow_level_supported(chains))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
@@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
+static void
+esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, int i)
+{
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = esw->fdb_table.offloads.slow_fdb;
+}
+
static int
esw_setup_chain_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (mlx5e_tc_attr_flags_skip(attr->flags)) {
- esw_setup_slow_path_dest(dest, flow_act, chains, *i);
+ } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
+ esw_setup_slow_path_dest(dest, flow_act, esw, *i);
+ (*i)++;
+ } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
+ esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
@@ -512,6 +523,20 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
}
}
+static void
+esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = attr->meter_attr.meter;
+ flow_act->exe_aso.type = attr->exe_aso_type;
+ flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
+ flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
+ /* use metadata reg 5 for packet color */
+ flow_act->exe_aso.return_reg_id = 5;
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -579,6 +604,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
+ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
+ esw_setup_meter(attr, &flow_act);
+
if (split) {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
@@ -1040,6 +1069,15 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
+ /* If changing eswitch mode from switchdev to legacy, but num_vfs is not 0,
+ * meta rules could be freed again. So set it to NULL.
+ */
+ esw->fdb_table.offloads.send_to_vport_meta_rules = NULL;
+}
+
+void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
}
static int
@@ -2034,7 +2072,7 @@ static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
@@ -2170,18 +2208,18 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- esw->dev->priv.sriov.num_vfs);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
}
+ mlx5_rescan_drivers(esw->dev);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@@ -2894,7 +2932,7 @@ int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
int err = 0;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_NONE) {
+ if (mlx5_esw_is_fdb_created(esw)) {
err = -EBUSY;
goto done;
}
@@ -3055,6 +3093,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
static void
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{
+ struct devlink *devlink;
bool host_pf_disabled;
u16 new_num_vfs;
@@ -3066,6 +3105,8 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
return;
+ devlink = priv_to_devlink(esw->dev);
+ devl_lock(devlink);
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
@@ -3078,6 +3119,7 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
return;
}
esw->esw_funcs.num_vfs = new_num_vfs;
+ devl_unlock(devlink);
}
static void esw_functions_changed_event_handler(struct work_struct *work)
@@ -3229,13 +3271,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
@@ -3334,36 +3375,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
-{
- /* devlink commands in NONE eswitch mode are currently supported only
- * on ECPF.
- */
- return (esw->mode == MLX5_ESWITCH_NONE &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
-}
-
-/* FIXME: devl_unlock() followed by devl_lock() inside driver callback
- * is never correct and prone to races. It's a transitional workaround,
- * never repeat this pattern.
- *
- * This code MUST be fixed before removing devlink_mutex as it is safe
- * to do only because of that mutex.
- */
-static void mlx5_eswtich_mode_callback_enter(struct devlink *devlink,
- struct mlx5_eswitch *esw)
-{
- devl_unlock(devlink);
- down_write(&esw->mode_lock);
-}
-
-static void mlx5_eswtich_mode_callback_exit(struct devlink *devlink,
- struct mlx5_eswitch *esw)
-{
- up_write(&esw->mode_lock);
- devl_lock(devlink);
-}
-
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3378,15 +3389,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- /* FIXME: devl_unlock() followed by devl_lock() inside driver callback
- * is never correct and prone to races. It's a transitional workaround,
- * never repeat this pattern.
- *
- * This code MUST be fixed before removing devlink_mutex as it is safe
- * to do only because of that mutex.
- */
- devl_unlock(devlink);
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -3399,6 +3401,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
+ mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -3409,6 +3412,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
+ mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
@@ -3417,7 +3421,6 @@ unlock:
mlx5_esw_unlock(esw);
enable_lag:
mlx5_lag_enable_change(esw->dev);
- devl_lock(devlink);
return err;
}
@@ -3430,14 +3433,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3484,10 +3482,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto out;
+ down_write(&esw->mode_lock);
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -3521,11 +3516,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
goto out;
esw->offloads.inline_mode = mlx5_mode;
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return 0;
out:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3538,14 +3533,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3555,16 +3545,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw;
- int err;
+ int err = 0;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
+ down_write(&esw->mode_lock);
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
@@ -3607,7 +3594,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
}
unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3615,21 +3602,15 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
*encap = esw->offloads.encap;
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
- return err;
+ up_write(&esw->mode_lock);
+ return 0;
}
static bool
@@ -3752,12 +3733,14 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
if (err)
goto devlink_err;
+ mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum);
err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err)
goto rep_err;
return 0;
rep_err:
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
devlink_err:
mlx5_esw_vport_disable(esw, vport_num);
@@ -3767,6 +3750,7 @@ devlink_err:
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{
mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 2ccf7bef9b05..735dc805dad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -479,6 +479,30 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
return 0;
}
+
+static void
+mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
+{
+ void *exe_aso_ctrl;
+ void *execute_aso;
+
+ execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
+ execute_aso[0]);
+ MLX5_SET(execute_aso, execute_aso, valid, 1);
+ MLX5_SET(execute_aso, execute_aso, aso_object_id,
+ fte->action.exe_aso.object_id);
+
+ exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
+ fte->action.exe_aso.return_reg_id);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
+ fte->action.exe_aso.type);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
+ fte->action.exe_aso.flow_meter.init_color);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
+ fte->action.exe_aso.flow_meter.meter_idx);
+}
+
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5_flow_table *ft,
@@ -663,6 +687,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
+ } else {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err_out:
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 21e5c709b2d3..f1b908d40fa6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2895,6 +2895,14 @@ static int create_fdb_bypass(struct mlx5_flow_steering *steering)
return 0;
}
+static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
+}
+
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *maj_prio;
@@ -2945,10 +2953,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
return 0;
out_err:
- cleanup_root_ns(steering->fdb_root_ns);
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
- steering->fdb_root_ns = NULL;
+ cleanup_fdb_root_ns(steering);
return err;
}
@@ -3108,10 +3113,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->fdb_root_ns);
- steering->fdb_root_ns = NULL;
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
+ cleanup_fdb_root_ns(steering);
cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index cfb8bedba512..079fa44ada71 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -289,6 +289,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
sw_owner_id[i]);
}
+ if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
+ dev->priv.sw_vhca_id > 0)
+ MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id);
+
return mlx5_cmd_exec_in(dev, init_hca, in);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 052af4901c0b..e8896f368362 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -149,6 +149,9 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
+ mlx5_unload_one(dev);
+ if (mlx5_health_wait_pci_up(dev))
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
mlx5_load_one(dev, false);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
@@ -183,15 +186,9 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
reset_reload_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- int err;
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
- err = mlx5_health_wait_pci_up(dev);
- if (err)
- mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
- fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
}
@@ -395,7 +392,6 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
}
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 659021c31cbd..2cf2c9948446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -666,16 +666,20 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
struct mlx5_fw_reporter_ctx fw_reporter_ctx;
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
+ struct devlink *devlink;
struct mlx5_priv *priv;
health = container_of(work, struct mlx5_core_health, fatal_report_work);
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
+ devlink = priv_to_devlink(dev);
enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ devl_lock(devlink);
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
+ devl_unlock(devlink);
return;
}
fw_reporter_ctx.err_synd = health->synd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 8da73ef5680f..ac3757beaea2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -83,7 +83,7 @@ static void mlx5i_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
static int mlx5i_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0a99a020a3b2..c02b7b08fb4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -322,10 +322,10 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!priv->fs->ns)
return -EINVAL;
err = mlx5e_arfs_create_tables(priv);
@@ -364,9 +364,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
+ }
+
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
mlx5e_create_q_counters(priv);
@@ -397,6 +406,8 @@ err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -408,6 +419,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+ mlx5e_fs_cleanup(priv->fs);
}
/* The stats groups order is opposite to the update_stats() order calls */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 5d41e19378e0..0f34e3c80d1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -638,6 +638,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
+ struct mlx5_core_dev *dev;
u8 mode;
#endif
int i;
@@ -647,11 +648,11 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev);
-
- if (mode != MLX5_ESWITCH_NONE && mode != MLX5_ESWITCH_OFFLOADS)
+ dev = ldev->pf[MLX5_LAG_P1].dev;
+ if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev))
return false;
+ mode = mlx5_eswitch_mode(dev);
for (i = 0; i < ldev->ports; i++)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
@@ -766,8 +767,7 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
#ifdef CONFIG_MLX5_ESWITCH
for (i = 0; i < ldev->ports; i++)
- roce_lag = roce_lag &&
- ldev->pf[i].dev->priv.eswitch->mode == MLX5_ESWITCH_NONE;
+ roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
return roce_lag;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
new file mode 100644
index 000000000000..21e14507ff5c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/transobj.h>
+#include "aso.h"
+#include "wq.h"
+
+struct mlx5_aso_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5_aso {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
+ struct mlx5_aso_cq cq;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+} ____cacheline_aligned_in_smp;
+
+static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
+ void *cqc_data, struct mlx5_aso_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_vector2eqn(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
+ struct mlx5_aso_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, log_cq_size, 1);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = create_aso_cq(cq, cqc_data);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
+ goto err_free_cq;
+ }
+
+ kvfree(cqc_data);
+ return 0;
+
+err_free_cq:
+ mlx5_aso_free_cq(cq);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ return 0;
+}
+
+static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ int err;
+
+ err = create_aso_sq(mdev, pdn, sqc_data, sq);
+ if (err)
+ return err;
+
+ err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+
+ return err;
+}
+
+static void mlx5_aso_free_sq(struct mlx5_aso *sq)
+{
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_aso_free_sq(sq);
+}
+
+static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
+ u32 pdn, struct mlx5_aso *sq)
+{
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, pdn);
+ MLX5_SET(wq, wq, log_wq_sz, 1);
+
+ err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
+ goto err_free_asosq;
+ }
+
+ mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_free_asosq:
+ mlx5_aso_free_sq(sq);
+err_out:
+ kvfree(sqc_data);
+ return err;
+}
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ struct mlx5_aso *aso;
+ int err;
+
+ aso = kzalloc(sizeof(*aso), GFP_KERNEL);
+ if (!aso)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
+ if (err)
+ goto err_cq;
+
+ err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
+ if (err)
+ goto err_sq;
+
+ return aso;
+
+err_sq:
+ mlx5_aso_destroy_cq(&aso->cq);
+err_cq:
+ kfree(aso);
+ return ERR_PTR(err);
+}
+
+void mlx5_aso_destroy(struct mlx5_aso *aso)
+{
+ if (IS_ERR_OR_NULL(aso))
+ return;
+
+ mlx5_aso_destroy_sq(aso);
+ mlx5_aso_destroy_cq(&aso->cq);
+ kfree(aso);
+}
+
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
+ (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_ACCESS_ASO);
+ cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->general_id = cpu_to_be32(obj_id);
+}
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+{
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
+ return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+}
+
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg)
+{
+ doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ if (with_data)
+ aso->pc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->pc += MLX5_ASO_WQEBBS;
+ *aso->wq.db = cpu_to_be32(aso->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
+{
+ struct mlx5_aso_cq *cq = &aso->cq;
+ struct mlx5_cqe64 *cqe;
+ unsigned long expires;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+
+ expires = jiffies + msecs_to_jiffies(interval_ms);
+ while (!cqe && time_is_after_jiffies(expires)) {
+ usleep_range(2, 10);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ }
+
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe;
+
+ mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+
+ err_cqe = (struct mlx5_err_cqe *)cqe;
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
+ err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n",
+ err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (with_data)
+ aso->cc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->cc += MLX5_ASO_WQEBBS;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
new file mode 100644
index 000000000000..b3bbf284fe71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_ASO_H__
+#define __MLX5_LIB_ASO_H__
+
+#include <linux/mlx5/qp.h>
+#include "mlx5_core.h"
+
+#define MLX5_ASO_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
+#define MLX5_ASO_WQEBBS_DATA \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
+#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
+
+struct mlx5_wqe_aso_ctrl_seg {
+ __be32 va_h;
+ __be32 va_l; /* include read_enable */
+ __be32 l_key;
+ u8 data_mask_mode;
+ u8 condition_1_0_operand;
+ u8 condition_1_0_offset;
+ u8 data_offset_condition_operand;
+ __be32 condition_0_data;
+ __be32 condition_0_mask;
+ __be32 condition_1_data;
+ __be32 condition_1_mask;
+ __be64 bitwise_data;
+ __be64 data_mask;
+};
+
+struct mlx5_wqe_aso_data_seg {
+ __be32 bytewise_data[16];
+};
+
+struct mlx5_aso_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+};
+
+struct mlx5_aso_wqe_data {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+ struct mlx5_wqe_aso_data_seg aso_data;
+};
+
+enum {
+ MLX5_ASO_LOGICAL_AND,
+ MLX5_ASO_LOGICAL_OR,
+};
+
+enum {
+ MLX5_ASO_ALWAYS_FALSE,
+ MLX5_ASO_ALWAYS_TRUE,
+ MLX5_ASO_EQUAL,
+ MLX5_ASO_NOT_EQUAL,
+ MLX5_ASO_GREATER_OR_EQUAL,
+ MLX5_ASO_LESSER_OR_EQUAL,
+ MLX5_ASO_LESSER,
+ MLX5_ASO_GREATER,
+ MLX5_ASO_CYCLIC_GREATER,
+ MLX5_ASO_CYCLIC_LESSER,
+};
+
+enum {
+ MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT,
+ MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE,
+ MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE,
+};
+
+enum {
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
+};
+
+struct mlx5_aso;
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode);
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg);
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms);
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
+void mlx5_aso_destroy(struct mlx5_aso *aso);
+#endif /* __MLX5_LIB_ASO_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 3d5e57ff558c..9482e51ac82a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -12,13 +12,16 @@ struct mlx5_dm {
spinlock_t lock;
unsigned long *steering_sw_icm_alloc_blocks;
unsigned long *header_modify_sw_icm_alloc_blocks;
+ unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
};
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
{
+ u64 header_modify_pattern_icm_blocks = 0;
u64 header_modify_icm_blocks = 0;
u64 steering_icm_blocks = 0;
struct mlx5_dm *dm;
+ bool support_v2;
if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
return NULL;
@@ -35,8 +38,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->steering_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(steering_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zalloc(steering_icm_blocks, GFP_KERNEL);
if (!dm->steering_sw_icm_alloc_blocks)
goto err_steering;
}
@@ -47,16 +49,33 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->header_modify_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zalloc(header_modify_icm_blocks, GFP_KERNEL);
if (!dm->header_modify_sw_icm_alloc_blocks)
goto err_modify_hdr;
}
+ support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
+ MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
+
+ if (support_v2) {
+ header_modify_pattern_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_pattern_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_modify_pattern_sw_icm_alloc_blocks =
+ bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
+ if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
+ goto err_pattern;
+ }
+
return dm;
+err_pattern:
+ bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
+
err_modify_hdr:
- kfree(dm->steering_sw_icm_alloc_blocks);
+ bitmap_free(dm->steering_sw_icm_alloc_blocks);
err_steering:
kfree(dm);
@@ -75,7 +94,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
- kfree(dm->steering_sw_icm_alloc_blocks);
+ bitmap_free(dm->steering_sw_icm_alloc_blocks);
}
if (dm->header_modify_sw_icm_alloc_blocks) {
@@ -83,7 +102,15 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
BIT(MLX5_CAP_DEV_MEM(dev,
log_header_modify_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
- kfree(dm->header_modify_sw_icm_alloc_blocks);
+ bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
+ }
+
+ if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_pattern_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ bitmap_free(dm->header_modify_pattern_sw_icm_alloc_blocks);
}
kfree(dm);
@@ -130,6 +157,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
log_header_modify_sw_icm_size);
block_map = dm->header_modify_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_pattern_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_pattern_sw_icm_size);
+ block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
@@ -203,6 +237,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
block_map = dm->header_modify_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_pattern_sw_icm_start_address);
+ block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
index d758848d34d0..696e45e2bd06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -32,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
-void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
+int mlx5_tout_init(struct mlx5_core_dev *dev)
{
int i;
- for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
- tout_set(dev, tout_def_sw_val[i], i);
-}
-
-int mlx5_tout_init(struct mlx5_core_dev *dev)
-{
dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
if (!dev->timeouts)
return -ENOMEM;
+ for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
+ tout_set(dev, tout_def_sw_val[i], i);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
index 257c03eeab36..bc9e9aeda847 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -35,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
-void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c9b4e50a593e..bec8d6d0b5f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -90,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
static u32 sw_owner_id[4];
+#define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
+static DEFINE_IDA(sw_vhca_ida);
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
@@ -314,13 +316,6 @@ struct mlx5_reg_host_endianness {
u8 rsvd[15];
};
-#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
-
-enum {
- MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
- MLX5_DEV_CAP_FLAG_DCT,
-};
-
static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
@@ -499,6 +494,31 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err;
}
+static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
+ if (err)
+ return err;
+
+ if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
+ !(dev->priv.sw_vhca_id > 0))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
+ MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
+
+ return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
+}
+
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
struct mlx5_profile *prof = &dev->profile;
@@ -524,7 +544,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
- prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
+ prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
@@ -669,6 +689,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_2(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
+ goto out;
+ }
+
out:
kfree(set_ctx);
return err;
@@ -1023,8 +1050,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- mlx5_tout_set_def_val(dev);
-
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
@@ -1257,6 +1282,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
+ mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
@@ -1276,8 +1302,10 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
int mlx5_init_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
int err = 0;
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
@@ -1306,6 +1334,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
goto err_register;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return 0;
err_register:
@@ -1320,11 +1349,15 @@ function_teardown:
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return err;
}
void mlx5_uninit_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
mlx5_unregister_device(dev);
@@ -1343,13 +1376,15 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
}
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
{
int err = 0;
u64 timeout;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "interface is up, NOP\n");
@@ -1391,8 +1426,20 @@ out:
return err;
}
-void mlx5_unload_one(struct mlx5_core_dev *dev)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ int ret;
+
+ devl_lock(devlink);
+ ret = mlx5_load_one_devl_locked(dev, recovery);
+ devl_unlock(devlink);
+ return ret;
+}
+
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
mlx5_detach_device(dev);
@@ -1410,6 +1457,15 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
+void mlx5_unload_one(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
+ mlx5_unload_one_devl_locked(dev);
+ devl_unlock(devlink);
+}
+
static const int types[] = {
MLX5_CAP_GENERAL,
MLX5_CAP_GENERAL_2,
@@ -1512,6 +1568,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
+ /* The conjunction of sw_vhca_id with sw_owner_id will be a global
+ * unique id per function which uses mlx5_core.
+ * Those values are supplied to FW as part of the init HCA command to
+ * be used by both driver and FW when it's applicable.
+ */
+ dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
+ MAX_SW_VHCA_ID,
+ GFP_KERNEL);
+ if (dev->priv.sw_vhca_id < 0)
+ mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
+ dev->priv.sw_vhca_id);
+
return 0;
err_hca_caps:
@@ -1536,6 +1604,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ if (priv->sw_vhca_id > 0)
+ ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
+
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
@@ -1859,7 +1930,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev);
+ mlx5_unload_one_devl_locked(dev);
}
int mlx5_recover_device(struct mlx5_core_dev *dev)
@@ -1870,7 +1941,7 @@ int mlx5_recover_device(struct mlx5_core_dev *dev)
return -EIO;
}
- return mlx5_load_one(dev, true);
+ return mlx5_load_one_devl_locked(dev, true);
}
static struct pci_driver mlx5_core_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 9cc7afea2758..ad61b86d5769 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -290,7 +290,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 3be659cd91f1..7d955a4d9f14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -501,7 +501,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
case MLX5_ESWITCH_OFFLOADS:
mlx5_sf_table_enable(table);
break;
- case MLX5_ESWITCH_NONE:
+ case MLX5_ESWITCH_LEGACY:
mlx5_sf_table_disable(table);
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2935614f6fa9..ee2e1b7c1310 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -145,8 +145,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
sriov->vfs_ctx[vf].enabled = 0;
}
- if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -155,13 +154,16 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
+ devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
@@ -174,10 +176,13 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int num_vfs = pci_num_vf(dev->pdev);
pci_disable_sriov(pdev);
+ devl_lock(devlink);
mlx5_device_disable_sriov(dev, num_vfs, true);
+ devl_unlock(devlink);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 1383550f44c1..b1dfad274a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -22,6 +22,7 @@ enum dr_action_valid_state {
DR_ACTION_STATE_PUSH_VLAN,
DR_ACTION_STATE_NON_TERM,
DR_ACTION_STATE_TERM,
+ DR_ACTION_STATE_ASO,
DR_ACTION_STATE_MAX,
};
@@ -42,6 +43,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
+ [DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
};
@@ -71,6 +73,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -85,6 +88,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -93,6 +97,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -105,6 +110,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -118,6 +124,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
@@ -128,6 +135,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -145,6 +153,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -163,18 +178,21 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -185,6 +203,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -196,6 +215,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -206,6 +226,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -219,6 +240,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -240,6 +271,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -253,6 +285,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -261,6 +294,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -272,6 +306,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -284,6 +319,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -296,6 +332,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -312,6 +349,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -331,6 +375,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -338,6 +383,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -345,6 +391,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -356,6 +403,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -368,6 +416,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -379,6 +428,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -393,6 +443,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -738,6 +799,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.reformat.param_0 = action->reformat->param_0;
attr.reformat.param_1 = action->reformat->param_1;
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ attr.aso_flow_meter.obj_id = action->aso->obj_id;
+ attr.aso_flow_meter.offset = action->aso->offset;
+ attr.aso_flow_meter.dest_reg_id = action->aso->dest_reg_id;
+ attr.aso_flow_meter.init_color = action->aso->init_color;
+ break;
default:
mlx5dr_err(dmn, "Unsupported action type %d\n", action_type);
return -EINVAL;
@@ -798,6 +865,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
+ [DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
};
static struct mlx5dr_action *
@@ -1830,6 +1898,34 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return action;
}
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id,
+ u8 dest_reg_id, u8 aso_type,
+ u8 init_color, u8 meter_id)
+{
+ struct mlx5dr_action *action;
+
+ if (aso_type != MLX5_EXE_ASO_FLOW_METER)
+ return NULL;
+
+ if (init_color > MLX5_FLOW_METER_COLOR_UNDEFINED)
+ return NULL;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_ASO_FLOW_METER);
+ if (!action)
+ return NULL;
+
+ action->aso->obj_id = obj_id;
+ action->aso->offset = meter_id;
+ action->aso->dest_reg_id = dest_reg_id;
+ action->aso->init_color = init_color;
+ action->aso->dmn = dmn;
+
+ refcount_inc(&dmn->refcount);
+
+ return action;
+}
+
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
@@ -1881,6 +1977,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
case DR_ACTION_TYP_SAMPLER:
refcount_dec(&action->sampler->dmn->refcount);
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ refcount_dec(&action->aso->dmn->refcount);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index d5998ef59be4..7adcf0eec13b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -21,10 +21,11 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_TABLE_TX = 3102,
DR_DUMP_REC_TYPE_MATCHER = 3200,
- DR_DUMP_REC_TYPE_MATCHER_MASK = 3201,
+ DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201,
DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
+ DR_DUMP_REC_TYPE_MATCHER_MASK = 3205,
DR_DUMP_REC_TYPE_RULE = 3300,
DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
@@ -114,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id);
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id);
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
break;
case DR_ACTION_TYP_CTR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index fcb962c6db2e..ee677a5c76be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -89,6 +89,7 @@ enum dr_ste_v1_action_id {
DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
DR_STE_V1_ACTION_ID_TRAILER = 0x13,
DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
DR_STE_V1_ACTION_ID_MAX = 0x21,
@@ -129,6 +130,10 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
};
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -494,6 +499,27 @@ static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
+ u32 object_id,
+ u32 offset,
+ u8 dest_reg_id,
+ u8 init_color)
+{
+ MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
+ DR_STE_V1_ACTION_ID_ASO);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
+ object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
+ /* Convert reg_c index to HW 64bit index */
+ MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
+ (dest_reg_id - 1) / 2);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
+ offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
+ init_color);
+}
+
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
@@ -629,6 +655,21 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -802,6 +843,21 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 98320e3945ad..feed227944b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -127,6 +127,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
+ DR_ACTION_TYP_ASO_FLOW_METER,
DR_ACTION_TYP_MAX,
};
@@ -271,6 +272,13 @@ struct mlx5dr_ste_actions_attr {
int count;
u32 headers[MLX5DR_MAX_VLANS];
} vlans;
+
+ struct {
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+ } aso_flow_meter;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1035,6 +1043,14 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
+struct mlx5dr_action_aso_flow_meter {
+ struct mlx5dr_domain *dmn;
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -1049,6 +1065,7 @@ struct mlx5dr_action {
struct mlx5dr_action_vport *vport;
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
+ struct mlx5dr_action_aso_flow_meter *aso;
};
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 6a9abba92df6..75672663359d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -500,6 +500,27 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action =
+ mlx5dr_action_create_aso(domain,
+ fte->action.exe_aso.object_id,
+ fte->action.exe_aso.return_reg_id,
+ fte->action.exe_aso.type,
+ fte->action.exe_aso.flow_meter.init_color,
+ fte->action.exe_aso.flow_meter.meter_idx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 9604b2091358..fb078fa0f0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -574,4 +574,30 @@ struct mlx5_ifc_dr_action_hw_copy_bits {
u8 reserved_at_38[0x8];
};
+enum {
+ MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2,
+};
+
+struct mlx5_ifc_ste_aso_flow_meter_action_bits {
+ u8 reserved_at_0[0xc];
+ u8 action[0x1];
+ u8 initial_color[0x2];
+ u8 line_id[0x1];
+};
+
+struct mlx5_ifc_ste_double_action_aso_v1_bits {
+ u8 action_id[0x8];
+ u8 aso_context_number[0x18];
+
+ u8 dest_reg_id[0x2];
+ u8 change_ordering_tag[0x1];
+ u8 aso_check_ordering[0x1];
+ u8 aso_context_type[0x4];
+ u8 reserved_at_28[0x8];
+ union {
+ u8 aso_fields[0x10];
+ struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter;
+ };
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 7626c85643b1..ecffc2cbe6fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -131,6 +131,14 @@ struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
+ u32 obj_id,
+ u8 return_reg_id,
+ u8 aso_type,
+ u8 init_color,
+ u8 meter_id);
+
int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index ac020cb78072..d5c317325030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
goto free;
MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
- MLX5_SET(modify_nic_vport_context_in, in,
- nic_vport_context.affiliated_vhca_id,
- MLX5_CAP_GEN(master_mdev, vhca_id));
+ if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
+ } else {
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN(master_mdev, vhca_id));
+ }
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria,
MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 84621b4cb15b..b03e1c66bac0 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -19,8 +19,6 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
-#define DRV_NAME "mlxbf_gige"
-
/* Allocate SKB whose payload pointer aligns with the Bluefield
* hardware DMA limitation, i.e. DMA operation can't cross
* a 4KB boundary. A maximum packet size of 2KB is assumed in the
@@ -427,7 +425,7 @@ static struct platform_driver mlxbf_gige_driver = {
.remove = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
},
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 4683312861ac..a510bf2cff2f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -7,6 +7,7 @@ config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
select NET_DEVLINK
select MLXFW
+ select AUXILIARY_BUS
help
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 1a465fd5d8b3..3ca9fce759ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
core_acl_flex_actions.o core_env.o \
- core_linecards.o
+ core_linecards.o core_linecard_dev.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
@@ -12,7 +12,6 @@ mlxsw_i2c-objs := i2c.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_router_xm.o \
spectrum1_kvdl.o spectrum2_kvdl.o \
spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
@@ -29,7 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o spectrum_trap.o \
- spectrum_ethtool.o spectrum_policer.o
+ spectrum_ethtool.o spectrum_policer.o \
+ spectrum_pgt.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 51b260d54237..60232fb8ccd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -329,6 +329,32 @@ MLXSW_ITEM64(cmd_mbox, query_fw, free_running_clock_offset, 0x50, 0, 64);
*/
MLXSW_ITEM32(cmd_mbox, query_fw, fr_rn_clk_bar, 0x58, 30, 2);
+/* cmd_mbox_query_fw_utc_sec_offset
+ * The offset of the UTC_Sec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_sec_offset, 0x70, 0, 64);
+
+/* cmd_mbox_query_fw_utc_sec_bar
+ * PCI base address register (BAR) of the UTC_Sec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_sec_bar, 0x78, 30, 2);
+
+/* cmd_mbox_query_fw_utc_nsec_offset
+ * The offset of the UTC_nSec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_nsec_offset, 0x80, 0, 64);
+
+/* cmd_mbox_query_fw_utc_nsec_bar
+ * PCI base address register (BAR) of the UTC_nSec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_nsec_bar, 0x88, 30, 2);
+
/* QUERY_BOARDINFO - Query Board Information
* -----------------------------------------
* OpMod == 0 (N/A), INMmod == 0 (N/A)
@@ -343,23 +369,6 @@ static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
}
-/* cmd_mbox_xm_num_local_ports
- * Number of local_ports connected to the xm.
- * Each local port is a 4x
- * Spectrum-2/3: 25G
- * Spectrum-4: 50G
- */
-MLXSW_ITEM32(cmd_mbox, boardinfo, xm_num_local_ports, 0x00, 4, 3);
-
-/* cmd_mbox_xm_exists
- * An XM (eXtanded Mezanine, e.g. used for the XLT) is connected on the board.
- */
-MLXSW_ITEM32(cmd_mbox, boardinfo, xm_exists, 0x00, 0, 1);
-
-/* cmd_mbox_xm_local_port_entry
- */
-MLXSW_ITEM_BIT_ARRAY(cmd_mbox, boardinfo, xm_local_port_entry, 0x04, 4, 8);
-
/* cmd_mbox_boardinfo_intapin
* When PCIe interrupt messages are being used, this value is used for clearing
* an interrupt. When using MSI-X, this register is not used.
@@ -650,6 +659,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+/* cmd_mbox_config_set_ubridge
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
+
/* cmd_mbox_config_set_kvd_linear_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
@@ -674,11 +689,11 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
-/* cmd_mbox_config_set_kvh_xlt_cache_mode
+/* cmd_mbox_config_set_cqe_time_stamp_type
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, set_kvh_xlt_cache_mode, 0x08, 3, 1);
+MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1);
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
@@ -736,16 +751,25 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+enum mlxsw_cmd_mbox_config_profile_flood_mode {
+ /* Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset
+ * tables. max_fid_flood_tables indicates the number of per-FID tables.
+ * Reserved when unified bridge model is used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED = 3,
+ /* Controlled flood tables. Reserved when legacy bridge model is
+ * used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
+};
+
/* cmd_mbox_config_profile_flood_mode
* Flooding mode to use.
- * 0-2 - Backward compatible modes for SwitchX devices.
- * 3 - Mixed mode, where:
- * max_flood_tables indicates the number of single-entry tables.
- * max_vid_flood_tables indicates the number of per-VID tables.
- * max_fid_offset_flood_tables indicates the number of FID-offset tables.
- * max_fid_flood_tables indicates the number of per-FID tables.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 3);
/* cmd_mbox_config_profile_max_fid_offset_flood_tables
* Maximum number of FID-offset flooding tables.
@@ -806,12 +830,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
-/* cmd_mbox_config_profile_kvh_xlt_cache_mode
- * KVH XLT cache mode:
- * 0 - XLT can use all KVH as best-effort
- * 1 - XLT cache uses 1/2 KVH
+/* cmd_mbox_config_profile_ubridge
+ * Unified Bridge
+ * 0 - non unified bridge
+ * 1 - unified bridge
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, kvh_xlt_cache_mode, 0x50, 8, 4);
+MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
/* cmd_mbox_config_kvd_linear_size
* KVD Linear Size
@@ -866,6 +890,26 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
0x60, 0, 8, 0x08, 0x00, false);
+enum mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type {
+ /* uSec - 1.024uSec (default). Only bits 15:0 are valid. */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_USEC,
+ /* FRC - Free Running Clock, units of 1nSec.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_FRC,
+ /* UTC. time_stamp[37:30] = Sec, time_stamp[29:0] = nSec.
+ * Reserved when SwitchX/2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
+/* cmd_mbox_config_profile_cqe_time_stamp_type
+ * CQE time_stamp_type for non-mirror-packets.
+ * Configured if set_cqe_time_stamp_type is set.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, cqe_time_stamp_type, 0xB0, 8, 2);
+
/* cmd_mbox_config_profile_cqe_version
* CQE version:
* 0: CQE version is 0
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index fc52832241b3..75553eb2c7f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -127,11 +127,11 @@ static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
max_ports, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink,
- DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
- max_ports, MLXSW_CORE_RESOURCE_PORTS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &ports_num_params);
+ return devl_resource_register(devlink,
+ DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
+ max_ports, MLXSW_CORE_RESOURCE_PORTS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &ports_num_params);
}
static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
@@ -157,8 +157,8 @@ static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
goto err_resources_ports_register;
}
atomic_set(&mlxsw_core->active_ports_count, 0);
- devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
- mlxsw_ports_occ_get, mlxsw_core);
+ devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
+ mlxsw_ports_occ_get, mlxsw_core);
return 0;
@@ -171,9 +171,9 @@ static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
+ devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
if (!reload)
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
kfree(mlxsw_core->ports);
}
@@ -951,6 +951,20 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_core->fw_flash_in_progress = true;
+ err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack);
+ mlxsw_core->fw_flash_in_progress = false;
+
+ return err;
+}
+
struct mlxsw_core_fw_info {
struct mlxfw_dev mlxfw_dev;
struct mlxsw_core *mlxsw_core;
@@ -1105,8 +1119,9 @@ static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
.fsm_release = mlxsw_core_fw_fsm_release,
};
-static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
- struct netlink_ext_ack *extack)
+static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core_fw_info mlxsw_core_fw_info = {
.mlxfw_dev = {
@@ -1117,13 +1132,9 @@ static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmw
},
.mlxsw_core = mlxsw_core
};
- int err;
- mlxsw_core->fw_flash_in_progress = true;
- err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
- mlxsw_core->fw_flash_in_progress = false;
-
- return err;
+ return mlxsw_core_fw_flash(mlxsw_core, &mlxsw_core_fw_info.mlxfw_dev,
+ firmware, extack);
}
static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
@@ -1169,7 +1180,7 @@ static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
+ err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL);
release_firmware(firmware);
if (err)
dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
@@ -1187,7 +1198,7 @@ static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
- return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
+ return mlxsw_core_dev_fw_flash(mlxsw_core, params->fw, extack);
}
static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
@@ -1498,13 +1509,15 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_re
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ int err;
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
- return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
- mlxsw_core->bus,
- mlxsw_core->bus_priv, true,
- devlink, extack);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink, extack);
+ return err;
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
@@ -2102,6 +2115,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err = -ENOMEM;
goto err_devlink_alloc;
}
+ devl_lock(devlink);
}
mlxsw_core = devlink_priv(devlink);
@@ -2187,6 +2201,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (!reload) {
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
}
return 0;
@@ -2214,12 +2229,14 @@ err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
err_register_resources:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- if (!reload)
+ if (!reload) {
+ devl_unlock(devlink);
devlink_free(devlink);
+ }
err_devlink_alloc:
return err;
}
@@ -2255,8 +2272,10 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (!reload)
+ if (!reload) {
devlink_unregister(devlink);
+ devl_lock(devlink);
+ }
if (devlink_is_reload_failed(devlink)) {
if (!reload)
@@ -2281,16 +2300,19 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
- if (!reload)
+ if (!reload) {
+ devl_unlock(devlink);
devlink_free(devlink);
+ }
return;
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
+ devl_unlock(devlink);
devlink_free(devlink);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
@@ -3151,18 +3173,6 @@ mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
return mlxsw_core_port->linecard;
}
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
-{
- const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
- int i;
-
- for (i = 0; i < bus_info->xm_local_ports_count; i++)
- if (bus_info->xm_local_ports[i] == local_port)
- return true;
- return false;
-}
-EXPORT_SYMBOL(mlxsw_core_port_is_xm);
-
void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
bool (*selector)(void *priv, u16 local_port),
void *priv)
@@ -3321,6 +3331,24 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
+
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver->sdq_supports_cqe_v2;
+}
+EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
{
mlxsw_core->emad.enable_string_tlv = true;
@@ -3331,9 +3359,15 @@ static int __init mlxsw_core_module_init(void)
{
int err;
+ err = mlxsw_linecard_driver_register();
+ if (err)
+ return err;
+
mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
- if (!mlxsw_wq)
- return -ENOMEM;
+ if (!mlxsw_wq) {
+ err = -ENOMEM;
+ goto err_alloc_workqueue;
+ }
mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
mlxsw_core_driver_name);
if (!mlxsw_owq) {
@@ -3344,6 +3378,8 @@ static int __init mlxsw_core_module_init(void)
err_alloc_ordered_workqueue:
destroy_workqueue(mlxsw_wq);
+err_alloc_workqueue:
+ mlxsw_linecard_driver_unregister();
return err;
}
@@ -3351,6 +3387,7 @@ static void __exit mlxsw_core_module_exit(void)
{
destroy_workqueue(mlxsw_owq);
destroy_workqueue(mlxsw_wq);
+ mlxsw_linecard_driver_unregister();
}
module_init(mlxsw_core_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c2a891287047..02d9cc2ef0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -12,12 +12,14 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/net_namespace.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include "trap.h"
#include "reg.h"
#include "cmd.h"
#include "resources.h"
+#include "../mlxfw/mlxfw.h"
enum mlxsw_core_resource_id {
MLXSW_CORE_RESOURCE_PORTS = 1,
@@ -47,6 +49,11 @@ mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
@@ -261,7 +268,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
struct mlxsw_linecard *
mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
bool (*selector)(void *priv,
u16 local_port),
@@ -296,8 +302,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
+ used_ubridge:1,
used_kvd_sizes:1,
- used_kvh_xlt_cache_mode:1;
+ used_cqe_time_stamp_type:1;
u8 max_vepa_channels;
u16 max_mid;
u16 max_pgt;
@@ -316,10 +323,11 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
+ u8 ubridge;
u32 kvd_linear_size;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
- u8 kvh_xlt_cache_mode;
+ u8 cqe_time_stamp_type;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -419,6 +427,7 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
+ bool sdq_supports_cqe_v2;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -429,6 +438,11 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
@@ -468,6 +482,8 @@ struct mlxsw_bus {
u8 *p_status);
u32 (*read_frc_h)(void *bus_priv);
u32 (*read_frc_l)(void *bus_priv);
+ u32 (*read_utc_sec)(void *bus_priv);
+ u32 (*read_utc_nsec)(void *bus_priv);
u8 features;
};
@@ -478,8 +494,6 @@ struct mlxsw_fw_rev {
u16 can_reset_minor;
};
-#define MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX 4
-
struct mlxsw_bus_info {
const char *device_kind;
const char *device_name;
@@ -488,10 +502,7 @@ struct mlxsw_bus_info {
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
u8 low_frequency:1,
- read_frc_capable:1,
- xm_exists:1;
- u8 xm_local_ports_count;
- u8 xm_local_ports[MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX];
+ read_clock_capable:1;
};
struct mlxsw_hwmon;
@@ -547,11 +558,17 @@ enum mlxsw_devlink_param_id {
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
};
+struct mlxsw_cqe_ts {
+ u8 sec;
+ u32 nsec;
+};
+
struct mlxsw_skb_cb {
union {
struct mlxsw_tx_info tx_info;
struct mlxsw_rx_md_info rx_md_info;
};
+ struct mlxsw_cqe_ts cqe_ts;
};
static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
@@ -567,6 +584,15 @@ enum mlxsw_linecard_status_event_type {
MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION,
};
+struct mlxsw_linecard_bdev;
+
+struct mlxsw_linecard_device_info {
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_sub_minor;
+ char psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+};
+
struct mlxsw_linecard {
u8 slot_index;
struct mlxsw_linecards *linecards;
@@ -581,6 +607,11 @@ struct mlxsw_linecard {
active:1;
u16 hw_revision;
u16 ini_version;
+ struct mlxsw_linecard_bdev *bdev;
+ struct {
+ struct mlxsw_linecard_device_info info;
+ u8 index;
+ } device;
};
struct mlxsw_linecard_types_info;
@@ -601,6 +632,14 @@ mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index)
return &linecards->linecards[slot_index - 1];
}
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *bus_info);
void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core);
@@ -620,4 +659,10 @@ void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
struct mlxsw_linecards_event_ops *ops,
void *priv);
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard);
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard);
+
+int mlxsw_linecard_driver_register(void);
+void mlxsw_linecard_driver_unregister(void);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index fa33caecc91d..636db9a87457 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1164,7 +1164,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
* trap control. In addition, the Trap / Discard action enables activating
* SPAN (port mirroring).
*
- * The Trap with userdef action action has the same functionality as
+ * The Trap with userdef action has the same functionality as
* the Trap action with addition of user defined value that can be set
* and used by higher layer applications.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 34bec9cd572c..0107cbc32fc7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -180,7 +180,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
} else {
/* When reading upper pages 1, 2 and 3 the offset
* starts at 0 and I2C high address is used. Please refer
- * refer to "Memory Organization" figure in SFF-8472
+ * to "Memory Organization" figure in SFF-8472
* specification for graphical depiction.
*/
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
new file mode 100644
index 000000000000..af37e650a8ad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/idr.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <net/devlink.h>
+#include "core.h"
+
+#define MLXSW_LINECARD_DEV_ID_NAME "lc"
+
+struct mlxsw_linecard_dev {
+ struct mlxsw_linecard *linecard;
+};
+
+struct mlxsw_linecard_bdev {
+ struct auxiliary_device adev;
+ struct mlxsw_linecard *linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+};
+
+static DEFINE_IDA(mlxsw_linecard_bdev_ida);
+
+static int mlxsw_linecard_bdev_id_alloc(void)
+{
+ return ida_alloc(&mlxsw_linecard_bdev_ida, GFP_KERNEL);
+}
+
+static void mlxsw_linecard_bdev_id_free(int id)
+{
+ ida_free(&mlxsw_linecard_bdev_ida, id);
+}
+
+static void mlxsw_linecard_bdev_release(struct device *device)
+{
+ struct auxiliary_device *adev =
+ container_of(device, struct auxiliary_device, dev);
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+
+ mlxsw_linecard_bdev_id_free(adev->id);
+ kfree(linecard_bdev);
+}
+
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev;
+ int err;
+ int id;
+
+ id = mlxsw_linecard_bdev_id_alloc();
+ if (id < 0)
+ return id;
+
+ linecard_bdev = kzalloc(sizeof(*linecard_bdev), GFP_KERNEL);
+ if (!linecard_bdev) {
+ mlxsw_linecard_bdev_id_free(id);
+ return -ENOMEM;
+ }
+ linecard_bdev->adev.id = id;
+ linecard_bdev->adev.name = MLXSW_LINECARD_DEV_ID_NAME;
+ linecard_bdev->adev.dev.release = mlxsw_linecard_bdev_release;
+ linecard_bdev->adev.dev.parent = linecard->linecards->bus_info->dev;
+ linecard_bdev->linecard = linecard;
+
+ err = auxiliary_device_init(&linecard_bdev->adev);
+ if (err) {
+ mlxsw_linecard_bdev_id_free(id);
+ kfree(linecard_bdev);
+ return err;
+ }
+
+ err = auxiliary_device_add(&linecard_bdev->adev);
+ if (err) {
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ return err;
+ }
+
+ linecard->bdev = linecard_bdev;
+ return 0;
+}
+
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev = linecard->bdev;
+
+ if (!linecard_bdev)
+ /* Unprovisioned line cards do not have an auxiliary device. */
+ return;
+ auxiliary_device_delete(&linecard_bdev->adev);
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ linecard->bdev = NULL;
+}
+
+static int mlxsw_linecard_dev_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_devlink_info_get(linecard, req, extack);
+}
+
+static int
+mlxsw_linecard_dev_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_flash_update(devlink, linecard,
+ params->fw, extack);
+}
+
+static const struct devlink_ops mlxsw_linecard_dev_devlink_ops = {
+ .info_get = mlxsw_linecard_dev_devlink_info_get,
+ .flash_update = mlxsw_linecard_dev_devlink_flash_update,
+};
+
+static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops,
+ sizeof(*linecard_dev), &adev->dev);
+ if (!devlink)
+ return -ENOMEM;
+ linecard_dev = devlink_priv(devlink);
+ linecard_dev->linecard = linecard_bdev->linecard;
+ linecard_bdev->linecard_dev = linecard_dev;
+
+ devlink_register(devlink);
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
+ return 0;
+}
+
+static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL);
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
+
+static const struct auxiliary_device_id mlxsw_linecard_bdev_id_table[] = {
+ { .name = KBUILD_MODNAME "." MLXSW_LINECARD_DEV_ID_NAME },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlxsw_linecard_bdev_id_table);
+
+static struct auxiliary_driver mlxsw_linecard_driver = {
+ .name = MLXSW_LINECARD_DEV_ID_NAME,
+ .probe = mlxsw_linecard_bdev_probe,
+ .remove = mlxsw_linecard_bdev_remove,
+ .id_table = mlxsw_linecard_bdev_id_table,
+};
+
+int mlxsw_linecard_driver_register(void)
+{
+ return auxiliary_driver_register(&mlxsw_linecard_driver);
+}
+
+void mlxsw_linecard_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&mlxsw_linecard_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index 5c9869dcf674..ca59f0b946da 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -13,6 +13,7 @@
#include <linux/vmalloc.h>
#include "core.h"
+#include "../mlxfw/mlxfw.h"
struct mlxsw_linecard_ini_file {
__le16 size;
@@ -87,6 +88,351 @@ static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard)
return linecard->name;
}
+struct mlxsw_linecard_device_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_linecard *linecard;
+};
+
+static int mlxsw_linecard_device_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index,
+ u32 *p_max_size,
+ u8 *p_align_bits,
+ u16 *p_max_write_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcqi_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcqi), &mcqi_pl);
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
+ p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size,
+ MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_linecard_device_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev,
+ u32 *fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ u16 component_index,
+ u32 component_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u8 *data,
+ u16 size, u32 offset)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcda_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcda), &mcda_pl);
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int mlxsw_linecard_device_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE,
+ 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ u8 error_code;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_linecard_device_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static void mlxsw_linecard_device_fw_fsm_release(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_linecard_device_dev_ops = {
+ .component_query = mlxsw_linecard_device_fw_component_query,
+ .fsm_lock = mlxsw_linecard_device_fw_fsm_lock,
+ .fsm_component_update = mlxsw_linecard_device_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_linecard_device_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_linecard_device_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_linecard_device_fw_fsm_activate,
+ .fsm_query_state = mlxsw_linecard_device_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_linecard_device_fw_fsm_cancel,
+ .fsm_release = mlxsw_linecard_device_fw_fsm_release,
+};
+
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ struct mlxsw_linecard_device_fw_info info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_linecard_device_dev_ops,
+ .psid = linecard->device.info.psid,
+ .psid_size = strlen(linecard->device.info.psid),
+ .devlink = linecard_devlink,
+ },
+ .mlxsw_core = mlxsw_core,
+ .linecard = linecard,
+ };
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (!linecard->active) {
+ NL_SET_ERR_MSG_MOD(extack, "Only active line cards can be flashed");
+ err = -EINVAL;
+ goto unlock;
+ }
+ err = mlxsw_core_fw_flash(mlxsw_core, &info.mlxfw_dev,
+ firmware, extack);
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_device_psid_get(struct mlxsw_linecard *linecard,
+ u8 device_index, char *psid)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mgir_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index, device_index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mgir), &mgir_pl);
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgir_fw_info_psid_memcpy_from(mgir_pl, psid);
+ return 0;
+}
+
+static int mlxsw_linecard_device_info_update(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ bool flashable_found = false;
+ u8 msg_seq = 0;
+
+ do {
+ struct mlxsw_linecard_device_info info;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool flash_owner;
+ bool data_valid;
+ u8 device_index;
+ int err;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, &flash_owner,
+ &device_index,
+ &info.fw_major,
+ &info.fw_minor,
+ &info.fw_sub_minor);
+ if (!data_valid)
+ break;
+ if (!flash_owner) /* We care only about flashable ones. */
+ continue;
+ if (flashable_found) {
+ dev_warn_once(linecard->linecards->bus_info->dev, "linecard %u: More flashable devices present, exposing only the first one\n",
+ linecard->slot_index);
+ return 0;
+ }
+
+ err = mlxsw_linecard_device_psid_get(linecard, device_index,
+ info.psid);
+ if (err)
+ return err;
+
+ linecard->device.info = info;
+ linecard->device.index = device_index;
+ flashable_found = true;
+ } while (msg_seq);
+
+ return 0;
+}
+
static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard)
{
linecard->provisioned = false;
@@ -226,12 +572,57 @@ void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister);
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ char buf[32];
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (WARN_ON(!linecard->provisioned)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ sprintf(buf, "%d", linecard->hw_revision);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ goto unlock;
+
+ sprintf(buf, "%d", linecard->ini_version);
+ err = devlink_info_version_running_put(req, "ini.version", buf);
+ if (err)
+ goto unlock;
+
+ if (linecard->active) {
+ struct mlxsw_linecard_device_info *info = &linecard->device.info;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ info->psid);
+
+ sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
+ info->fw_sub_minor);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
static int
mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
u16 hw_revision, u16 ini_version)
{
struct mlxsw_linecards *linecards = linecard->linecards;
const char *type;
+ int err;
type = mlxsw_linecard_types_lookup(linecards, card_type);
mlxsw_linecard_status_event_done(linecard,
@@ -252,6 +643,14 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
linecard->provisioned = true;
linecard->hw_revision = hw_revision;
linecard->ini_version = ini_version;
+
+ err = mlxsw_linecard_bdev_add(linecard);
+ if (err) {
+ linecard->provisioned = false;
+ mlxsw_linecard_provision_fail(linecard);
+ return err;
+ }
+
devlink_linecard_provision_set(linecard->devlink_linecard, type);
return 0;
}
@@ -260,6 +659,7 @@ static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard)
{
mlxsw_linecard_status_event_done(linecard,
MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ mlxsw_linecard_bdev_del(linecard);
linecard->provisioned = false;
devlink_linecard_provision_clear(linecard->devlink_linecard);
}
@@ -270,6 +670,10 @@ static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard)
char mddc_pl[MLXSW_REG_MDDC_LEN];
int err;
+ err = mlxsw_linecard_device_info_update(linecard);
+ if (err)
+ return err;
+
mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
if (err)
@@ -885,6 +1289,7 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
mlxsw_core_flush_owq();
if (linecard->active)
mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_bdev_del(linecard);
devlink_linecard_destroy(linecard->devlink_linecard);
mutex_destroy(&linecard->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9660d4cce96..d9bf584234a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -359,8 +359,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
/* Create port objects for each valid entry */
devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0 &&
- !mlxsw_core_port_is_xm(mlxsw_m->core, i)) {
+ if (mlxsw_m->module_to_port[i] > 0) {
err = mlxsw_m_port_create(mlxsw_m,
mlxsw_m->module_to_port[i],
i);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index f91dde4df152..50527adc5b5a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -103,6 +103,8 @@ struct mlxsw_pci {
struct pci_dev *pdev;
u8 __iomem *hw_addr;
u64 free_running_clock_offset;
+ u64 utc_sec_offset;
+ u64 utc_nsec_offset;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -456,9 +458,9 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
{
q->u.cq.v = mlxsw_pci->max_cqe_ver;
- /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
- q->num < mlxsw_pci->num_sdq_cqs)
+ q->num < mlxsw_pci->num_sdq_cqs &&
+ !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
q->u.cq.v = MLXSW_PCI_CQE_V1;
}
@@ -505,9 +507,32 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
+static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
+ ptrdiff_t off)
+{
+ return ioread32be(mlxsw_pci->hw_addr + off);
+}
+
+static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
+ struct sk_buff *skb,
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+{
+ if (cqe_v != MLXSW_PCI_CQE_V2)
+ return;
+
+ if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) !=
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC)
+ return;
+
+ mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
+ mlxsw_skb_cb(skb)->cqe_ts.nsec =
+ mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
+}
+
static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
+ enum mlxsw_pci_cqe_v cqe_v,
char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -527,6 +552,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
if (unlikely(!tx_info.is_emad &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
tx_info.local_port);
skb = NULL;
@@ -647,6 +673,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
}
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
@@ -698,7 +726,7 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, ncqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
@@ -1235,6 +1263,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
+ if (profile->used_ubridge) {
+ mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
+ profile->ubridge);
+ }
if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
@@ -1252,12 +1285,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
}
- if (profile->used_kvh_xlt_cache_mode) {
- mlxsw_cmd_mbox_config_profile_set_kvh_xlt_cache_mode_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvh_xlt_cache_mode_set(
- mbox, profile->kvh_xlt_cache_mode);
- }
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
@@ -1268,31 +1295,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
}
- return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
-}
-
-static int mlxsw_pci_boardinfo_xm_process(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_bus_info *bus_info,
- char *mbox)
-{
- int count = mlxsw_cmd_mbox_boardinfo_xm_num_local_ports_get(mbox);
- int i;
-
- if (!mlxsw_cmd_mbox_boardinfo_xm_exists_get(mbox))
- return 0;
-
- bus_info->xm_exists = true;
-
- if (count > MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX) {
- dev_err(&mlxsw_pci->pdev->dev, "Invalid number of XM local ports\n");
- return -EINVAL;
+ if (profile->used_cqe_time_stamp_type) {
+ mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
+ profile->cqe_time_stamp_type);
}
- bus_info->xm_local_ports_count = count;
- for (i = 0; i < count; i++)
- bus_info->xm_local_ports[i] =
- mlxsw_cmd_mbox_boardinfo_xm_local_port_entry_get(mbox,
- i);
- return 0;
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
}
static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
@@ -1306,8 +1316,7 @@ static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
return err;
mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
-
- return mlxsw_pci_boardinfo_xm_process(mlxsw_pci, bus_info, mbox);
+ return 0;
}
static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -1550,6 +1559,24 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->free_running_clock_offset =
mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
+ if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_sec_bar;
+ }
+
+ mlxsw_pci->utc_sec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_nsec_bar;
+ }
+
+ mlxsw_pci->utc_nsec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
+
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1582,6 +1609,14 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_config_profile;
+ /* Some resources depend on unified bridge model, which is configured
+ * as part of config_profile. Query the resources again to get correct
+ * values.
+ */
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+ if (err)
+ goto err_requery_resources;
+
err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
if (err)
goto err_aqs_init;
@@ -1599,12 +1634,15 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
+err_requery_resources:
err_config_profile:
err_cqe_v_check:
err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
err_fw_area_init:
+err_utc_nsec_bar:
+err_utc_sec_bar:
err_fr_rn_clk_bar:
err_doorbell_page_bar:
err_iface_rev:
@@ -1819,19 +1857,33 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
static u32 mlxsw_pci_read_frc_h(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- u64 frc_offset;
+ u64 frc_offset_h;
- frc_offset = mlxsw_pci->free_running_clock_offset;
- return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
+ frc_offset_h = mlxsw_pci->free_running_clock_offset;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
}
static u32 mlxsw_pci_read_frc_l(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- u64 frc_offset;
+ u64 frc_offset_l;
+
+ frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
+}
+
+static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
+}
+
+static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
- frc_offset = mlxsw_pci->free_running_clock_offset;
- return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
}
static const struct mlxsw_bus mlxsw_pci_bus = {
@@ -1843,6 +1895,8 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
.read_frc_h = mlxsw_pci_read_frc_h,
.read_frc_l = mlxsw_pci_read_frc_l,
+ .read_utc_sec = mlxsw_pci_read_utc_sec,
+ .read_utc_nsec = mlxsw_pci_read_utc_nsec,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
@@ -1933,7 +1987,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
- mlxsw_pci->bus_info.read_frc_capable = true;
+ mlxsw_pci->bus_info.read_clock_capable = true;
mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 7b531228d6c0..48dbfea0a2a1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -41,9 +41,6 @@
#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
((offset) + (type_offset) + (num) * 4)
-#define MLXSW_PCI_FREE_RUNNING_CLOCK_H(offset) (offset)
-#define MLXSW_PCI_FREE_RUNNING_CLOCK_L(offset) ((offset) + 4)
-
#define MLXSW_PCI_CQS_MAX 96
#define MLXSW_PCI_EQS_COUNT 2
#define MLXSW_PCI_EQ_ASYNC_NUM 0
@@ -217,6 +214,25 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+/* pci_cqe_time_stamp_low
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_low, 0x0C, 16, 16);
+
#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F
/* pci_cqe_mirror_tclass
@@ -280,8 +296,67 @@ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
*/
MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8);
+enum mlxsw_pci_cqe_time_stamp_type {
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_USEC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_FRC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC,
+};
+
+/* pci_cqe_time_stamp_type
+ * Time stamp type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type)
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_type, 0x18, 22, 2);
+
#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF
+/* pci_cqe_time_stamp_high
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_high, 0x18, 0, 22);
+
+static inline u64 mlxsw_pci_cqe2_time_stamp_get(const char *cqe)
+{
+ u64 ts_high = mlxsw_pci_cqe2_time_stamp_high_get(cqe);
+ u64 ts_low = mlxsw_pci_cqe2_time_stamp_low_get(cqe);
+
+ return ts_high << 16 | ts_low;
+}
+
+static inline u8 mlxsw_pci_cqe2_time_stamp_sec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts >> 30 & 0xFF;
+}
+
+static inline u32 mlxsw_pci_cqe2_time_stamp_nsec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts & 0x3FFFFFFF;
+}
+
/* pci_cqe_mirror_latency
* End-to-end latency of the original packet that does mirroring to the CPU.
* Value of 0xFFFFFF means that the latency is invalid. Units are according to
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 741fd2989d12..ac4d4ea51597 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -15,8 +15,6 @@
#define MLXSW_PORT_SWID_TYPE_IB 1
#define MLXSW_PORT_SWID_TYPE_ETH 2
-#define MLXSW_PORT_MID 0xd000
-
#define MLXSW_PORT_MAX_IB_PHY_PORTS 36
#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 93af6c974ece..f27bdecdf952 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -322,6 +322,18 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
/* reg_sfd_uc_fid_vid
* Filtering ID or VLAN ID
* For SwitchX and SwitchX-2:
@@ -335,6 +347,15 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_vid
+ * New VID when set_vid=1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when set_vid=0.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
/* reg_sfd_uc_system_port
* Unique port identifier for the final destination of the packet.
* Access: RW
@@ -359,7 +380,7 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
- const char *mac, u16 fid_vid,
+ const char *mac, u16 fid_vid, u16 vid,
enum mlxsw_reg_sfd_rec_action action,
u16 local_port)
{
@@ -368,6 +389,8 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_set_vid_set(payload, rec_index, vid ? true : false);
+ mlxsw_reg_sfd_uc_vid_set(payload, rec_index, vid);
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
@@ -379,6 +402,18 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_lag_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
/* reg_sfd_uc_lag_fid_vid
* Filtering ID or VLAN ID
* For SwitchX and SwitchX-2:
@@ -393,8 +428,10 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
/* reg_sfd_uc_lag_lag_vid
- * Indicates VID in case of vFIDs. Reserved for FIDs.
+ * New vlan ID.
* Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and set_vid=0.
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
MLXSW_REG_SFD_REC_LEN, 0x0C, false);
@@ -419,6 +456,7 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_lag_set_vid_set(payload, rec_index, true);
mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
@@ -997,7 +1035,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
* to packet types used for flooding.
*/
#define MLXSW_REG_SFGC_ID 0x2011
-#define MLXSW_REG_SFGC_LEN 0x10
+#define MLXSW_REG_SFGC_LEN 0x14
MLXSW_REG_DEFINE(sfgc, MLXSW_REG_SFGC_ID, MLXSW_REG_SFGC_LEN);
@@ -1019,9 +1057,10 @@ enum mlxsw_reg_sfgc_type {
*/
MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
-enum mlxsw_reg_sfgc_bridge_type {
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
- MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+/* bridge_type is used in SFGC and SFMR. */
+enum mlxsw_reg_bridge_type {
+ MLXSW_REG_BRIDGE_TYPE_0 = 0, /* Used for .1q FIDs. */
+ MLXSW_REG_BRIDGE_TYPE_1 = 1, /* Used for .1d FIDs. */
};
/* reg_sfgc_bridge_type
@@ -1054,12 +1093,6 @@ MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
*/
MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
-/* reg_sfgc_mid
- * The multicast ID for the swid. Not supported for Spectrum
- * Access: RW
- */
-MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
-
/* reg_sfgc_counter_set_type
* Counter Set Type for flow counters.
* Access: RW
@@ -1072,18 +1105,26 @@ MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
*/
MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+/* reg_sfgc_mid_base
+ * MID Base.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfgc, mid_base, 0x10, 0, 16);
+
static inline void
mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
- enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_reg_bridge_type bridge_type,
enum mlxsw_flood_table_type table_type,
- unsigned int flood_table)
+ unsigned int flood_table, u16 mid_base)
{
MLXSW_REG_ZERO(sfgc, payload);
mlxsw_reg_sfgc_type_set(payload, type);
mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfgc_table_type_set(payload, table_type);
mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
- mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+ mlxsw_reg_sfgc_mid_base_set(payload, mid_base);
}
/* SFDF - Switch Filtering DB Flush
@@ -1516,7 +1557,7 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port,
* virtualized ports.
*/
#define MLXSW_REG_SVFA_ID 0x201C
-#define MLXSW_REG_SVFA_LEN 0x10
+#define MLXSW_REG_SVFA_LEN 0x18
MLXSW_REG_DEFINE(svfa, MLXSW_REG_SVFA_ID, MLXSW_REG_SVFA_LEN);
@@ -1537,6 +1578,7 @@ MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12);
enum mlxsw_reg_svfa_mt {
MLXSW_REG_SVFA_MT_VID_TO_FID,
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_VNI_TO_FID,
};
/* reg_svfa_mapping_table
@@ -1586,20 +1628,76 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
*/
MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
-static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port,
- enum mlxsw_reg_svfa_mt mt, bool valid,
- u16 fid, u16 vid)
+/* reg_svfa_vni
+ * Virtual Network Identifier.
+ * Access: Index
+ *
+ * Note: Reserved when mapping_table is not 2 (VNI mapping table).
+ */
+MLXSW_ITEM32(reg, svfa, vni, 0x10, 0, 24);
+
+/* reg_svfa_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non enabled RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, svfa, irif_v, 0x14, 24, 1);
+
+/* reg_svfa_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, svfa, irif, 0x14, 0, 16);
+
+static inline void __mlxsw_reg_svfa_pack(char *payload,
+ enum mlxsw_reg_svfa_mt mt, bool valid,
+ u16 fid, bool irif_v, u16 irif)
{
MLXSW_REG_ZERO(svfa, payload);
- local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
mlxsw_reg_svfa_swid_set(payload, 0);
- mlxsw_reg_svfa_local_port_set(payload, local_port);
mlxsw_reg_svfa_mapping_table_set(payload, mt);
mlxsw_reg_svfa_v_set(payload, valid);
mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_irif_v_set(payload, irif_v);
+ mlxsw_reg_svfa_irif_set(payload, irif_v ? irif : 0);
+}
+
+static inline void mlxsw_reg_svfa_port_vid_pack(char *payload, u16 local_port,
+ bool valid, u16 fid, u16 vid,
+ bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_local_port_set(payload, local_port);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_svfa_vid_pack(char *payload, bool valid, u16 fid,
+ u16 vid, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vid_set(payload, vid);
}
+static inline void mlxsw_reg_svfa_vni_pack(char *payload, bool valid, u16 fid,
+ u32 vni, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VNI_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_vni_set(payload, vni);
+}
+
/* SPVTR - Switch Port VLAN Stacking Register
* ------------------------------------------
* The Switch Port VLAN Stacking register configures the VLAN mode of the port
@@ -1741,7 +1839,7 @@ static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port,
* Creates and configures FIDs.
*/
#define MLXSW_REG_SFMR_ID 0x201F
-#define MLXSW_REG_SFMR_LEN 0x18
+#define MLXSW_REG_SFMR_LEN 0x30
MLXSW_REG_DEFINE(sfmr, MLXSW_REG_SFMR_ID, MLXSW_REG_SFMR_LEN);
@@ -1764,6 +1862,28 @@ MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
*/
MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+/* reg_sfmr_flood_rsp
+ * Router sub-port flooding table.
+ * 0 - Regular flooding table.
+ * 1 - Router sub-port flooding table. For this FID the flooding is per
+ * router-sub-port local_port. Must not be set for a FID which is not a
+ * router-sub-port and must be set prior to enabling the relevant RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
+
+/* reg_sfmr_flood_bridge_type
+ * Flood bridge type (see SFGC.bridge_type).
+ * 0 - type_0.
+ * 1 - type_1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when flood_rsp=1.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
+
/* reg_sfmr_fid_offset
* FID offset.
* Used to point into the flooding table selected by SFGC register if
@@ -1800,15 +1920,57 @@ MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
/* reg_sfmr_vni
* Virtual Network Identifier.
+ * When legacy bridge model is used, a given VNI can only be assigned to one
+ * FID. When unified bridge model is used, it configures only the FID->VNI,
+ * the VNI->FID is done by SVFA.
* Access: RW
- *
- * Note: A given VNI can only be assigned to one FID.
*/
MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+/* reg_sfmr_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non valid RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, irif_v, 0x14, 24, 1);
+
+/* reg_sfmr_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, sfmr, irif, 0x14, 0, 16);
+
+/* reg_sfmr_smpe_valid
+ * SMPE is valid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe_valid, 0x28, 20, 1);
+
+/* reg_sfmr_smpe
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
+
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
- u16 fid_offset)
+ u16 fid_offset, bool flood_rsp,
+ enum mlxsw_reg_bridge_type bridge_type,
+ bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
@@ -1816,6 +1978,10 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
mlxsw_reg_sfmr_vtfp_set(payload, false);
mlxsw_reg_sfmr_vv_set(payload, false);
+ mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
+ mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
/* SPVMLR - Switch Port VLAN MAC Learning Register
@@ -2013,6 +2179,45 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port,
mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
}
+/* SMPE - Switch Multicast Port to Egress VID
+ * ------------------------------------------
+ * The switch multicast port to egress VID maps
+ * {egress_port, SMPE index} -> {VID}.
+ */
+#define MLXSW_REG_SMPE_ID 0x202B
+#define MLXSW_REG_SMPE_LEN 0x0C
+
+MLXSW_REG_DEFINE(smpe, MLXSW_REG_SMPE_ID, MLXSW_REG_SMPE_LEN);
+
+/* reg_smpe_local_port
+ * Local port number.
+ * CPU port is not supported.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, smpe, 0x00, 16, 0x00, 12);
+
+/* reg_smpe_smpe_index
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smpe, smpe_index, 0x04, 0, 16);
+
+/* reg_smpe_evid
+ * Egress VID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, smpe, evid, 0x08, 0, 12);
+
+static inline void mlxsw_reg_smpe_pack(char *payload, u16 local_port,
+ u16 smpe_index, u16 evid)
+{
+ MLXSW_REG_ZERO(smpe, payload);
+ mlxsw_reg_smpe_local_port_set(payload, local_port);
+ mlxsw_reg_smpe_smpe_index_set(payload, smpe_index);
+ mlxsw_reg_smpe_evid_set(payload, evid);
+}
+
/* SFTR-V2 - Switch Flooding Table Version 2 Register
* --------------------------------------------------
* The switch flooding table is used for flooding packet replication. The table
@@ -2107,6 +2312,23 @@ MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8);
*/
MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16);
+/* reg_smid2_smpe_valid
+ * SMPE is valid.
+ * When not valid, the egress VID will not be modified by the SMPE table.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe_valid, 0x08, 20, 1);
+
+/* reg_smid2_smpe
+ * Switch multicast port to egress VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe, 0x08, 0, 16);
+
/* reg_smid2_port
* Local port memebership (1 bit per port).
* Access: RW
@@ -2120,13 +2342,15 @@ MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1);
MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1);
static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port,
- bool set)
+ bool set, bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(smid2, payload);
mlxsw_reg_smid2_swid_set(payload, 0);
mlxsw_reg_smid2_mid_set(payload, mid);
mlxsw_reg_smid2_port_set(payload, port, set);
mlxsw_reg_smid2_port_mask_set(payload, port, 1);
+ mlxsw_reg_smid2_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_smid2_smpe_set(payload, smpe_valid ? smpe : 0);
}
/* CWTP - Congetion WRED ECN TClass Profile
@@ -6701,31 +6925,32 @@ MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
/* VLAN Interface */
-/* reg_ritr_vlan_if_vid
+/* reg_ritr_vlan_if_vlan_id
* VLAN ID.
* Access: RW
*/
-MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
+MLXSW_ITEM32(reg, ritr, vlan_if_vlan_id, 0x08, 0, 12);
+
+/* reg_ritr_vlan_if_efid
+ * Egress FID.
+ * Used to connect the RIF to a bridge.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-1.
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_efid, 0x0C, 0, 16);
/* FID Interface */
/* reg_ritr_fid_if_fid
- * Filtering ID. Used to connect a bridge to the router. Only FIDs from
- * the vFID range are supported.
+ * Filtering ID. Used to connect a bridge to the router.
+ * When legacy bridge model is used, only FIDs from the vFID range are
+ * supported. When unified bridge model is used, this is the egress FID for
+ * router to bridge.
* Access: RW
*/
MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
-static inline void mlxsw_reg_ritr_fid_set(char *payload,
- enum mlxsw_reg_ritr_if_type rif_type,
- u16 fid)
-{
- if (rif_type == MLXSW_REG_RITR_FID_IF)
- mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
- else
- mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
-}
-
/* Sub-port Interface */
/* reg_ritr_sp_if_lag
@@ -6742,6 +6967,16 @@ MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
*/
MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
+/* reg_ritr_sp_if_efid
+ * Egress filtering ID.
+ * Used to connect the eRIF to a bridge if eRIF-ACL has modified the DMAC or
+ * the VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_efid, 0x0C, 0, 16);
+
/* reg_ritr_sp_if_vid
* VLAN ID.
* Access: RW
@@ -6881,10 +7116,11 @@ static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
}
static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
- u16 system_port, u16 vid)
+ u16 system_port, u16 efid, u16 vid)
{
mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_efid_set(payload, efid);
mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
}
@@ -6918,6 +7154,20 @@ static inline void mlxsw_reg_ritr_mac_pack(char *payload, const char *mac)
}
static inline void
+mlxsw_reg_ritr_vlan_if_pack(char *payload, bool enable, u16 rif, u16 vr_id,
+ u16 mtu, const char *mac, u8 mac_profile_id,
+ u16 vlan_id, u16 efid)
+{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_VLAN_IF;
+
+ mlxsw_reg_ritr_pack(payload, enable, type, rif, vr_id, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+ mlxsw_reg_ritr_if_mac_profile_id_set(payload, mac_profile_id);
+ mlxsw_reg_ritr_vlan_if_vlan_id_set(payload, vlan_id);
+ mlxsw_reg_ritr_vlan_if_efid_set(payload, efid);
+}
+
+static inline void
mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload,
enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
enum mlxsw_reg_ritr_loopback_ipip_options options,
@@ -7848,11 +8098,10 @@ static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
- u32 *dip)
+ u32 dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_ralue_dip4_set(payload, *dip);
+ mlxsw_reg_ralue_dip4_set(payload, dip);
}
static inline void mlxsw_reg_ralue_pack6(char *payload,
@@ -7862,8 +8111,7 @@ static inline void mlxsw_reg_ralue_pack6(char *payload,
const void *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
+ mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
}
static inline void
@@ -8926,656 +9174,62 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
-/* RXLTE - Router XLT Enable Register
- * ----------------------------------
- * The RXLTE enables XLT (eXtended Lookup Table) LPM lookups if a capable
- * XM is present on the system.
- */
-
-#define MLXSW_REG_RXLTE_ID 0x8050
-#define MLXSW_REG_RXLTE_LEN 0x0C
-
-MLXSW_REG_DEFINE(rxlte, MLXSW_REG_RXLTE_ID, MLXSW_REG_RXLTE_LEN);
-
-/* reg_rxlte_virtual_router
- * Virtual router ID associated with the router interface.
- * Range is 0..cap_max_virtual_routers-1
- * Access: Index
- */
-MLXSW_ITEM32(reg, rxlte, virtual_router, 0x00, 0, 16);
-
-enum mlxsw_reg_rxlte_protocol {
- MLXSW_REG_RXLTE_PROTOCOL_IPV4,
- MLXSW_REG_RXLTE_PROTOCOL_IPV6,
-};
-
-/* reg_rxlte_protocol
- * Access: Index
- */
-MLXSW_ITEM32(reg, rxlte, protocol, 0x04, 0, 4);
-
-/* reg_rxlte_lpm_xlt_en
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxlte, lpm_xlt_en, 0x08, 0, 1);
-
-static inline void mlxsw_reg_rxlte_pack(char *payload, u16 virtual_router,
- enum mlxsw_reg_rxlte_protocol protocol,
- bool lpm_xlt_en)
-{
- MLXSW_REG_ZERO(rxlte, payload);
- mlxsw_reg_rxlte_virtual_router_set(payload, virtual_router);
- mlxsw_reg_rxlte_protocol_set(payload, protocol);
- mlxsw_reg_rxlte_lpm_xlt_en_set(payload, lpm_xlt_en);
-}
-
-/* RXLTM - Router XLT M select Register
- * ------------------------------------
- * The RXLTM configures and selects the M for the XM lookups.
- */
-
-#define MLXSW_REG_RXLTM_ID 0x8051
-#define MLXSW_REG_RXLTM_LEN 0x14
-
-MLXSW_REG_DEFINE(rxltm, MLXSW_REG_RXLTM_ID, MLXSW_REG_RXLTM_LEN);
-
-/* reg_rxltm_m0_val_v6
- * Global M0 value For IPv6.
- * Range 0..128
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxltm, m0_val_v6, 0x10, 16, 8);
-
-/* reg_rxltm_m0_val_v4
- * Global M0 value For IPv4.
- * Range 0..32
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxltm, m0_val_v4, 0x10, 0, 6);
-
-static inline void mlxsw_reg_rxltm_pack(char *payload, u8 m0_val_v4, u8 m0_val_v6)
-{
- MLXSW_REG_ZERO(rxltm, payload);
- mlxsw_reg_rxltm_m0_val_v6_set(payload, m0_val_v6);
- mlxsw_reg_rxltm_m0_val_v4_set(payload, m0_val_v4);
-}
-
-/* RLCMLD - Router LPM Cache ML Delete Register
- * --------------------------------------------
- * The RLCMLD register is used to bulk delete the XLT-LPM cache ML entries.
- * This can be used by SW when L is increased or decreased, thus need to
- * remove entries with old ML values.
- */
-
-#define MLXSW_REG_RLCMLD_ID 0x8055
-#define MLXSW_REG_RLCMLD_LEN 0x30
-
-MLXSW_REG_DEFINE(rlcmld, MLXSW_REG_RLCMLD_ID, MLXSW_REG_RLCMLD_LEN);
-
-enum mlxsw_reg_rlcmld_select {
- MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES,
- MLXSW_REG_RLCMLD_SELECT_M_ENTRIES,
- MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES,
-};
-
-/* reg_rlcmld_select
- * Which entries to delete.
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, select, 0x00, 16, 2);
-
-enum mlxsw_reg_rlcmld_filter_fields {
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL = 0x04,
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER = 0x08,
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP = 0x10,
-};
-
-/* reg_rlcmld_filter_fields
- * If a bit is '0' then the relevant field is ignored.
- * Access: Index
+/* REIV - Router Egress Interface to VID Register
+ * ----------------------------------------------
+ * The REIV register maps {eRIF, egress_port} -> VID.
+ * This mapping is done at the egress, after the ACLs.
+ * This mapping always takes effect after router, regardless of cast
+ * (for unicast/multicast/port-base multicast), regardless of eRIF type and
+ * regardless of bridge decisions (e.g. SFD for unicast or SMPE).
+ * Reserved when the RIF is a loopback RIF.
+ *
+ * Note: Reserved when legacy bridge model is used.
*/
-MLXSW_ITEM32(reg, rlcmld, filter_fields, 0x00, 0, 8);
+#define MLXSW_REG_REIV_ID 0x8034
+#define MLXSW_REG_REIV_BASE_LEN 0x20 /* base length, without records */
+#define MLXSW_REG_REIV_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_REIV_REC_MAX_COUNT 256 /* firmware limitation */
+#define MLXSW_REG_REIV_LEN (MLXSW_REG_REIV_BASE_LEN + \
+ MLXSW_REG_REIV_REC_LEN * \
+ MLXSW_REG_REIV_REC_MAX_COUNT)
-enum mlxsw_reg_rlcmld_protocol {
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6,
-};
+MLXSW_REG_DEFINE(reiv, MLXSW_REG_REIV_ID, MLXSW_REG_REIV_LEN);
-/* reg_rlcmld_protocol
+/* reg_reiv_port_page
+ * Port page - elport_record[0] is 256*port_page.
* Access: Index
*/
-MLXSW_ITEM32(reg, rlcmld, protocol, 0x08, 0, 4);
+MLXSW_ITEM32(reg, reiv, port_page, 0x00, 0, 4);
-/* reg_rlcmld_virtual_router
- * Virtual router ID.
- * Range is 0..cap_max_virtual_routers-1
+/* reg_reiv_erif
+ * Egress RIF.
+ * Range is 0..cap_max_router_interfaces-1.
* Access: Index
*/
-MLXSW_ITEM32(reg, rlcmld, virtual_router, 0x0C, 0, 16);
-
-/* reg_rlcmld_dip
- * The prefix of the route or of the marker that the object of the LPM
- * is compared with. The most significant bits of the dip are the prefix.
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, dip4, 0x1C, 0, 32);
-MLXSW_ITEM_BUF(reg, rlcmld, dip6, 0x10, 16);
-
-/* reg_rlcmld_dip_mask
- * per bit:
- * 0: no match
- * 1: match
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, dip_mask4, 0x2C, 0, 32);
-MLXSW_ITEM_BUF(reg, rlcmld, dip_mask6, 0x20, 16);
-
-static inline void __mlxsw_reg_rlcmld_pack(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- enum mlxsw_reg_rlcmld_protocol protocol,
- u16 virtual_router)
-{
- u8 filter_fields = MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL |
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER |
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP;
-
- MLXSW_REG_ZERO(rlcmld, payload);
- mlxsw_reg_rlcmld_select_set(payload, select);
- mlxsw_reg_rlcmld_filter_fields_set(payload, filter_fields);
- mlxsw_reg_rlcmld_protocol_set(payload, protocol);
- mlxsw_reg_rlcmld_virtual_router_set(payload, virtual_router);
-}
-
-static inline void mlxsw_reg_rlcmld_pack4(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- u16 virtual_router,
- u32 dip, u32 dip_mask)
-{
- __mlxsw_reg_rlcmld_pack(payload, select,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4,
- virtual_router);
- mlxsw_reg_rlcmld_dip4_set(payload, dip);
- mlxsw_reg_rlcmld_dip_mask4_set(payload, dip_mask);
-}
-
-static inline void mlxsw_reg_rlcmld_pack6(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- u16 virtual_router,
- const void *dip, const void *dip_mask)
-{
- __mlxsw_reg_rlcmld_pack(payload, select,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6,
- virtual_router);
- mlxsw_reg_rlcmld_dip6_memcpy_to(payload, dip);
- mlxsw_reg_rlcmld_dip_mask6_memcpy_to(payload, dip_mask);
-}
-
-/* RLPMCE - Router LPM Cache Enable Register
- * -----------------------------------------
- * Allows disabling the LPM cache. Can be changed on the fly.
- */
-
-#define MLXSW_REG_RLPMCE_ID 0x8056
-#define MLXSW_REG_RLPMCE_LEN 0x4
-
-MLXSW_REG_DEFINE(rlpmce, MLXSW_REG_RLPMCE_ID, MLXSW_REG_RLPMCE_LEN);
-
-/* reg_rlpmce_flush
- * Flush:
- * 0: do not flush the cache (default)
- * 1: flush (clear) the cache
- * Access: WO
- */
-MLXSW_ITEM32(reg, rlpmce, flush, 0x00, 4, 1);
-
-/* reg_rlpmce_disable
- * LPM cache:
- * 0: enabled (default)
- * 1: disabled
- * Access: RW
- */
-MLXSW_ITEM32(reg, rlpmce, disable, 0x00, 0, 1);
-
-static inline void mlxsw_reg_rlpmce_pack(char *payload, bool flush,
- bool disable)
-{
- MLXSW_REG_ZERO(rlpmce, payload);
- mlxsw_reg_rlpmce_flush_set(payload, flush);
- mlxsw_reg_rlpmce_disable_set(payload, disable);
-}
-
-/* Note that XLTQ, XMDR, XRMT and XRALXX register positions violate the rule
- * of ordering register definitions by the ID. However, XRALXX pack helpers are
- * using RALXX pack helpers, RALXX registers have higher IDs.
- * Also XMDR is using RALUE enums. XLRQ and XRMT are just put alongside with the
- * related registers.
- */
-
-/* XLTQ - XM Lookup Table Query Register
- * -------------------------------------
- */
-#define MLXSW_REG_XLTQ_ID 0x7802
-#define MLXSW_REG_XLTQ_LEN 0x2C
-
-MLXSW_REG_DEFINE(xltq, MLXSW_REG_XLTQ_ID, MLXSW_REG_XLTQ_LEN);
-
-enum mlxsw_reg_xltq_xm_device_id {
- MLXSW_REG_XLTQ_XM_DEVICE_ID_UNKNOWN,
- MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT = 0xCF71,
-};
-
-/* reg_xltq_xm_device_id
- * XM device ID.
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xm_device_id, 0x04, 0, 16);
-
-/* reg_xltq_xlt_cap_ipv4_lpm
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xlt_cap_ipv4_lpm, 0x10, 0, 1);
-
-/* reg_xltq_xlt_cap_ipv6_lpm
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xlt_cap_ipv6_lpm, 0x10, 1, 1);
-
-/* reg_xltq_cap_xlt_entries
- * Number of XLT entries
- * Note: SW must not fill more than 80% in order to avoid overflow
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, cap_xlt_entries, 0x20, 0, 32);
-
-/* reg_xltq_cap_xlt_mtable
- * XLT M-Table max size
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, cap_xlt_mtable, 0x24, 0, 32);
+MLXSW_ITEM32(reg, reiv, erif, 0x04, 0, 16);
-static inline void mlxsw_reg_xltq_pack(char *payload)
-{
- MLXSW_REG_ZERO(xltq, payload);
-}
-
-static inline void mlxsw_reg_xltq_unpack(char *payload, u16 *xm_device_id, bool *xlt_cap_ipv4_lpm,
- bool *xlt_cap_ipv6_lpm, u32 *cap_xlt_entries,
- u32 *cap_xlt_mtable)
-{
- *xm_device_id = mlxsw_reg_xltq_xm_device_id_get(payload);
- *xlt_cap_ipv4_lpm = mlxsw_reg_xltq_xlt_cap_ipv4_lpm_get(payload);
- *xlt_cap_ipv6_lpm = mlxsw_reg_xltq_xlt_cap_ipv6_lpm_get(payload);
- *cap_xlt_entries = mlxsw_reg_xltq_cap_xlt_entries_get(payload);
- *cap_xlt_mtable = mlxsw_reg_xltq_cap_xlt_mtable_get(payload);
-}
-
-/* XMDR - XM Direct Register
- * -------------------------
- * The XMDR allows direct access to the XM device via the switch.
- * Working in synchronous mode. FW waits for response from the XLT
- * for each command. FW acks the XMDR accordingly.
- */
-#define MLXSW_REG_XMDR_ID 0x7803
-#define MLXSW_REG_XMDR_BASE_LEN 0x20
-#define MLXSW_REG_XMDR_TRANS_LEN 0x80
-#define MLXSW_REG_XMDR_LEN (MLXSW_REG_XMDR_BASE_LEN + \
- MLXSW_REG_XMDR_TRANS_LEN)
-
-MLXSW_REG_DEFINE(xmdr, MLXSW_REG_XMDR_ID, MLXSW_REG_XMDR_LEN);
-
-/* reg_xmdr_bulk_entry
- * Bulk_entry
- * 0: Last entry - immediate flush of XRT-cache
- * 1: Bulk entry - do not flush the XRT-cache
+/* reg_reiv_rec_update
+ * Update enable (when write):
+ * 0 - Do not update the entry.
+ * 1 - Update the entry.
* Access: OP
*/
-MLXSW_ITEM32(reg, xmdr, bulk_entry, 0x04, 8, 1);
-
-/* reg_xmdr_num_rec
- * Number of records for Direct access to XM
- * Supported: 0..4 commands (except NOP which is a filler)
- * 0 commands is reserved when bulk_entry = 1.
- * 0 commands is allowed when bulk_entry = 0 for immediate XRT-cache flush.
- * Access: OP
- */
-MLXSW_ITEM32(reg, xmdr, num_rec, 0x04, 0, 4);
-
-/* reg_xmdr_reply_vect
- * Reply Vector
- * Bit i for command index i+1
- * values per bit:
- * 0: failed
- * 1: succeeded
- * e.g. if commands 1, 2, 4 succeeded and command 3 failed then binary
- * value will be 0b1011
- * Access: RO
- */
-MLXSW_ITEM_BIT_ARRAY(reg, xmdr, reply_vect, 0x08, 4, 1);
-
-static inline void mlxsw_reg_xmdr_pack(char *payload, bool bulk_entry)
-{
- MLXSW_REG_ZERO(xmdr, payload);
- mlxsw_reg_xmdr_bulk_entry_set(payload, bulk_entry);
-}
-
-enum mlxsw_reg_xmdr_c_cmd_id {
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4 = 0x30,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6 = 0x31,
-};
-
-#define MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN 32
-#define MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN 48
-
-/* reg_xmdr_c_cmd_id
- */
-MLXSW_ITEM32(reg, xmdr_c, cmd_id, 0x00, 24, 8);
-
-/* reg_xmdr_c_seq_number
- */
-MLXSW_ITEM32(reg, xmdr_c, seq_number, 0x00, 12, 12);
-
-enum mlxsw_reg_xmdr_c_ltr_op {
- /* Activity is set */
- MLXSW_REG_XMDR_C_LTR_OP_WRITE = 0,
- /* There is no update mask. All fields are updated. */
- MLXSW_REG_XMDR_C_LTR_OP_UPDATE = 1,
- MLXSW_REG_XMDR_C_LTR_OP_DELETE = 2,
-};
-
-/* reg_xmdr_c_ltr_op
- * Operation.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_op, 0x04, 24, 8);
-
-/* reg_xmdr_c_ltr_trap_action
- * Trap action.
- * Values are defined in enum mlxsw_reg_ralue_trap_action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_trap_action, 0x04, 20, 4);
-
-enum mlxsw_reg_xmdr_c_ltr_trap_id_num {
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS0,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS1,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS2,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS3,
-};
-
-/* reg_xmdr_c_ltr_trap_id_num
- * Trap-ID number.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_trap_id_num, 0x04, 16, 4);
-
-/* reg_xmdr_c_ltr_virtual_router
- * Virtual Router ID.
- * Range is 0..cap_max_virtual_routers-1
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_virtual_router, 0x04, 0, 16);
-
-/* reg_xmdr_c_ltr_prefix_len
- * Number of bits in the prefix of the LPM route.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_prefix_len, 0x08, 24, 8);
-
-/* reg_xmdr_c_ltr_bmp_len
- * The best match prefix length in the case that there is no match for
- * longer prefixes.
- * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_bmp_len, 0x08, 16, 8);
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_update, MLXSW_REG_REIV_BASE_LEN, 31, 1,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
-/* reg_xmdr_c_ltr_entry_type
- * Entry type.
- * Values are defined in enum mlxsw_reg_ralue_entry_type.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_entry_type, 0x08, 4, 4);
-
-enum mlxsw_reg_xmdr_c_ltr_action_type {
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL,
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE,
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME,
-};
-
-/* reg_xmdr_c_ltr_action_type
- * Action Type.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_action_type, 0x08, 0, 4);
-
-/* reg_xmdr_c_ltr_erif
- * Egress Router Interface.
- * Only relevant in case of LOCAL action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_erif, 0x10, 0, 16);
-
-/* reg_xmdr_c_ltr_adjacency_index
- * Points to the first entry of the group-based ECMP.
- * Only relevant in case of REMOTE action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_adjacency_index, 0x10, 0, 24);
-
-#define MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC 0xFFFFFF
-
-/* reg_xmdr_c_ltr_pointer_to_tunnel
- * Only relevant in case of IP2ME action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_pointer_to_tunnel, 0x10, 0, 24);
-
-/* reg_xmdr_c_ltr_ecmp_size
- * Amount of sequential entries starting
- * from the adjacency_index (the number of ECMPs).
- * The valid range is 1-64, 512, 1024, 2048 and 4096.
- * Only relevant in case of REMOTE action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_ecmp_size, 0x14, 0, 32);
-
-/* reg_xmdr_c_ltr_dip*
- * The prefix of the route or of the marker that the object of the LPM
- * is compared with. The most significant bits of the dip are the prefix.
- * The least significant bits must be '0' if the prefix_len is smaller
- * than 128 for IPv6 or smaller than 32 for IPv4.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_dip4, 0x1C, 0, 32);
-MLXSW_ITEM_BUF(reg, xmdr_c, ltr_dip6, 0x1C, 16);
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_xmdr_c_cmd_id cmd_id, u16 seq_number,
- enum mlxsw_reg_xmdr_c_ltr_op op, u16 virtual_router,
- u8 prefix_len)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
- u8 num_rec = mlxsw_reg_xmdr_num_rec_get(xmdr_payload);
-
- mlxsw_reg_xmdr_num_rec_set(xmdr_payload, num_rec + 1);
-
- mlxsw_reg_xmdr_c_cmd_id_set(payload, cmd_id);
- mlxsw_reg_xmdr_c_seq_number_set(payload, seq_number);
- mlxsw_reg_xmdr_c_ltr_op_set(payload, op);
- mlxsw_reg_xmdr_c_ltr_virtual_router_set(payload, virtual_router);
- mlxsw_reg_xmdr_c_ltr_prefix_len_set(payload, prefix_len);
- mlxsw_reg_xmdr_c_ltr_entry_type_set(payload,
- MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
- mlxsw_reg_xmdr_c_ltr_bmp_len_set(payload, prefix_len);
-}
-
-static inline unsigned int
-mlxsw_reg_xmdr_c_ltr_pack4(char *xmdr_payload, unsigned int trans_offset,
- u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op,
- u16 virtual_router, u8 prefix_len, u32 *dip)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4,
- seq_number, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_xmdr_c_ltr_dip4_set(payload, *dip);
- return MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN;
-}
-
-static inline unsigned int
-mlxsw_reg_xmdr_c_ltr_pack6(char *xmdr_payload, unsigned int trans_offset,
- u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op,
- u16 virtual_router, u8 prefix_len, const void *dip)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6,
- seq_number, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_xmdr_c_ltr_dip6_memcpy_to(payload, dip);
- return MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN;
-}
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_act_remote_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_ralue_trap_action trap_action,
- enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num,
- u32 adjacency_index, u16 ecmp_size)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE);
- mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action);
- mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num);
- mlxsw_reg_xmdr_c_ltr_adjacency_index_set(payload, adjacency_index);
- mlxsw_reg_xmdr_c_ltr_ecmp_size_set(payload, ecmp_size);
-}
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_act_local_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_ralue_trap_action trap_action,
- enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num, u16 erif)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL);
- mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action);
- mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num);
- mlxsw_reg_xmdr_c_ltr_erif_set(payload, erif);
-}
-
-static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(char *xmdr_payload,
- unsigned int trans_offset)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME);
- mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload,
- MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC);
-}
-
-static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(char *xmdr_payload,
- unsigned int trans_offset,
- u32 pointer_to_tunnel)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME);
- mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload, pointer_to_tunnel);
-}
-
-/* XRMT - XM Router M Table Register
- * ---------------------------------
- * The XRMT configures the M-Table for the XLT-LPM.
- */
-#define MLXSW_REG_XRMT_ID 0x7810
-#define MLXSW_REG_XRMT_LEN 0x14
-
-MLXSW_REG_DEFINE(xrmt, MLXSW_REG_XRMT_ID, MLXSW_REG_XRMT_LEN);
-
-/* reg_xrmt_index
- * Index in M-Table.
- * Range 0..cap_xlt_mtable-1
- * Access: Index
- */
-MLXSW_ITEM32(reg, xrmt, index, 0x04, 0, 20);
-
-/* reg_xrmt_l0_val
+/* reg_reiv_rec_evid
+ * Egress VID.
+ * Range is 0..4095.
* Access: RW
*/
-MLXSW_ITEM32(reg, xrmt, l0_val, 0x10, 24, 8);
-
-static inline void mlxsw_reg_xrmt_pack(char *payload, u32 index, u8 l0_val)
-{
- MLXSW_REG_ZERO(xrmt, payload);
- mlxsw_reg_xrmt_index_set(payload, index);
- mlxsw_reg_xrmt_l0_val_set(payload, l0_val);
-}
-
-/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
- * -----------------------------------------------------------
- * The XRALTA is used to allocate the XLT LPM trees.
- *
- * This register embeds original RALTA register.
- */
-#define MLXSW_REG_XRALTA_ID 0x7811
-#define MLXSW_REG_XRALTA_LEN 0x08
-#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_evid, MLXSW_REG_REIV_BASE_LEN, 0, 12,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
-MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
-
-static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
- enum mlxsw_reg_ralxx_protocol protocol,
- u8 tree_id)
+static inline void mlxsw_reg_reiv_pack(char *payload, u8 port_page, u16 erif)
{
- char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
-
- MLXSW_REG_ZERO(xralta, payload);
- mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
-}
-
-/* XRALST - XM Router Algorithmic LPM Structure Tree Register
- * ----------------------------------------------------------
- * The XRALST is used to set and query the structure of an XLT LPM tree.
- *
- * This register embeds original RALST register.
- */
-#define MLXSW_REG_XRALST_ID 0x7812
-#define MLXSW_REG_XRALST_LEN 0x108
-#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
-
-static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
-{
- char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
-
- MLXSW_REG_ZERO(xralst, payload);
- mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
-}
-
-static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
- u8 left_child_bin,
- u8 right_child_bin)
-{
- char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
-
- mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
- right_child_bin);
-}
-
-/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
- * --------------------------------------------------------
- * The XRALTB register is used to bind virtual router and protocol
- * to an allocated LPM tree.
- *
- * This register embeds original RALTB register.
- */
-#define MLXSW_REG_XRALTB_ID 0x7813
-#define MLXSW_REG_XRALTB_LEN 0x08
-#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
-
-static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
- enum mlxsw_reg_ralxx_protocol protocol,
- u8 tree_id)
-{
- char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
-
- MLXSW_REG_ZERO(xraltb, payload);
- mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
+ MLXSW_REG_ZERO(reiv, payload);
+ mlxsw_reg_reiv_port_page_set(payload, port_page);
+ mlxsw_reg_reiv_erif_set(payload, erif);
}
/* MFCR - Management Fan Control Register
@@ -10693,6 +10347,8 @@ MLXSW_REG_DEFINE(mtutc, MLXSW_REG_MTUTC_ID, MLXSW_REG_MTUTC_LEN);
enum mlxsw_reg_mtutc_operation {
MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC = 0,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 1,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME = 2,
MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ = 3,
};
@@ -10705,25 +10361,50 @@ MLXSW_ITEM32(reg, mtutc, operation, 0x00, 0, 4);
/* reg_mtutc_freq_adjustment
* Frequency adjustment: Every PPS the HW frequency will be
* adjusted by this value. Units of HW clock, where HW counts
- * 10^9 HW clocks for 1 HW second.
+ * 10^9 HW clocks for 1 HW second. Range is from -50,000,000 to +50,000,000.
+ * In Spectrum-2, the field is reversed, positive values mean to decrease the
+ * frequency.
* Access: RW
*/
MLXSW_ITEM32(reg, mtutc, freq_adjustment, 0x04, 0, 32);
+#define MLXSW_REG_MTUTC_MAX_FREQ_ADJ (50 * 1000 * 1000)
+
/* reg_mtutc_utc_sec
* UTC seconds.
* Access: WO
*/
MLXSW_ITEM32(reg, mtutc, utc_sec, 0x10, 0, 32);
+/* reg_mtutc_utc_nsec
+ * UTC nSecs.
+ * Range 0..(10^9-1)
+ * Updated when operation is SET_TIME_IMMEDIATE.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, utc_nsec, 0x14, 0, 30);
+
+/* reg_mtutc_time_adjustment
+ * Time adjustment.
+ * Units of nSec.
+ * Range is from -32768 to +32767.
+ * Updated when operation is ADJUST_TIME.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, time_adjustment, 0x18, 0, 32);
+
static inline void
mlxsw_reg_mtutc_pack(char *payload, enum mlxsw_reg_mtutc_operation oper,
- u32 freq_adj, u32 utc_sec)
+ u32 freq_adj, u32 utc_sec, u32 utc_nsec, u32 time_adj)
{
MLXSW_REG_ZERO(mtutc, payload);
mlxsw_reg_mtutc_operation_set(payload, oper);
mlxsw_reg_mtutc_freq_adjustment_set(payload, freq_adj);
mlxsw_reg_mtutc_utc_sec_set(payload, utc_sec);
+ mlxsw_reg_mtutc_utc_nsec_set(payload, utc_nsec);
+ mlxsw_reg_mtutc_time_adjustment_set(payload, time_adj);
}
/* MCQI - Management Component Query Information
@@ -11391,15 +11072,76 @@ MLXSW_ITEM32(reg, mtptpt, trap_id, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, mtptpt, message_type, 0x04, 0, 16);
-static inline void mlxsw_reg_mtptptp_pack(char *payload,
- enum mlxsw_reg_mtptpt_trap_id trap_id,
- u16 message_type)
+static inline void mlxsw_reg_mtptpt_pack(char *payload,
+ enum mlxsw_reg_mtptpt_trap_id trap_id,
+ u16 message_type)
{
MLXSW_REG_ZERO(mtptpt, payload);
mlxsw_reg_mtptpt_trap_id_set(payload, trap_id);
mlxsw_reg_mtptpt_message_type_set(payload, message_type);
}
+/* MTPCPC - Monitoring Time Precision Correction Port Configuration Register
+ * -------------------------------------------------------------------------
+ */
+#define MLXSW_REG_MTPCPC_ID 0x9093
+#define MLXSW_REG_MTPCPC_LEN 0x2C
+
+MLXSW_REG_DEFINE(mtpcpc, MLXSW_REG_MTPCPC_ID, MLXSW_REG_MTPCPC_LEN);
+
+/* reg_mtpcpc_pport
+ * Per port:
+ * 0: config is global. When reading - the local_port is 1.
+ * 1: config is per port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpcpc, pport, 0x00, 31, 1);
+
+/* reg_mtpcpc_local_port
+ * Local port number.
+ * Supported to/from CPU port.
+ * Reserved when pport = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, mtpcpc, 0x00, 16, 0x00, 12);
+
+/* reg_mtpcpc_ptp_trap_en
+ * Enable PTP traps.
+ * The trap_id is configured by MTPTPT.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ptp_trap_en, 0x04, 0, 1);
+
+/* reg_mtpcpc_ing_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at ingress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ing_correction_message_type, 0x10, 0, 16);
+
+/* reg_mtpcpc_egr_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at egress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, egr_correction_message_type, 0x14, 0, 16);
+
+static inline void mlxsw_reg_mtpcpc_pack(char *payload, bool pport,
+ u16 local_port, bool ptp_trap_en,
+ u16 ing, u16 egr)
+{
+ MLXSW_REG_ZERO(mtpcpc, payload);
+ mlxsw_reg_mtpcpc_pport_set(payload, pport);
+ mlxsw_reg_mtpcpc_local_port_set(payload, pport ? local_port : 0);
+ mlxsw_reg_mtpcpc_ptp_trap_en_set(payload, ptp_trap_en);
+ mlxsw_reg_mtpcpc_ing_correction_message_type_set(payload, ing);
+ mlxsw_reg_mtpcpc_egr_correction_message_type_set(payload, egr);
+}
+
/* MFGD - Monitoring FW General Debug Register
* -------------------------------------------
*/
@@ -11622,6 +11364,95 @@ mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index,
*p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload);
}
+/* MDDT - Management DownStream Device Tunneling Register
+ * ------------------------------------------------------
+ * This register allows to deliver query and request messages (PRM registers,
+ * commands) to a DownStream device.
+ */
+#define MLXSW_REG_MDDT_ID 0x9160
+#define MLXSW_REG_MDDT_LEN 0x110
+
+MLXSW_REG_DEFINE(mddt, MLXSW_REG_MDDT_ID, MLXSW_REG_MDDT_LEN);
+
+/* reg_mddt_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, slot_index, 0x00, 8, 4);
+
+/* reg_mddt_device_index
+ * Device index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, device_index, 0x00, 0, 8);
+
+/* reg_mddt_read_size
+ * Read size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, read_size, 0x04, 24, 8);
+
+/* reg_mddt_write_size
+ * Write size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, write_size, 0x04, 16, 8);
+
+enum mlxsw_reg_mddt_status {
+ MLXSW_REG_MDDT_STATUS_OK,
+};
+
+/* reg_mddt_status
+ * Return code of the Downstream Device to the register that was sent.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddt, status, 0x0C, 24, 8);
+
+enum mlxsw_reg_mddt_method {
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+};
+
+/* reg_mddt_method
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, method, 0x0C, 22, 2);
+
+/* reg_mddt_register_id
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, register_id, 0x0C, 0, 16);
+
+#define MLXSW_REG_MDDT_PAYLOAD_OFFSET 0x0C
+#define MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN 4
+
+static inline char *mlxsw_reg_mddt_inner_payload(char *payload)
+{
+ return payload + MLXSW_REG_MDDT_PAYLOAD_OFFSET +
+ MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+}
+
+static inline void mlxsw_reg_mddt_pack(char *payload, u8 slot_index,
+ u8 device_index,
+ enum mlxsw_reg_mddt_method method,
+ const struct mlxsw_reg_info *reg,
+ char **inner_payload)
+{
+ int len = reg->len + MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+
+ if (WARN_ON(len + MLXSW_REG_MDDT_PAYLOAD_OFFSET > MLXSW_REG_MDDT_LEN))
+ len = MLXSW_REG_MDDT_LEN - MLXSW_REG_MDDT_PAYLOAD_OFFSET;
+
+ MLXSW_REG_ZERO(mddt, payload);
+ mlxsw_reg_mddt_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddt_device_index_set(payload, device_index);
+ mlxsw_reg_mddt_method_set(payload, method);
+ mlxsw_reg_mddt_register_id_set(payload, reg->id);
+ mlxsw_reg_mddt_read_size_set(payload, len / 4);
+ mlxsw_reg_mddt_write_size_set(payload, len / 4);
+ *inner_payload = mlxsw_reg_mddt_inner_payload(payload);
+}
+
/* MDDQ - Management DownStream Device Query Register
* --------------------------------------------------
* This register allows to query the DownStream device properties. The desired
@@ -11643,7 +11474,11 @@ MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1);
enum mlxsw_reg_mddq_query_type {
MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1,
- MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME = 3,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices
+ * on the slot, data_valid
+ * will be '0'.
+ */
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME,
};
/* reg_mddq_query_type
@@ -11657,6 +11492,28 @@ MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4);
+/* reg_mddq_response_msg_seq
+ * Response message sequential number. For a specific request, the response
+ * message sequential number is the following one. In addition, the last
+ * message should be 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8);
+
+/* reg_mddq_request_msg_seq
+ * Request message sequential number.
+ * The first message number should be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8);
+
+/* reg_mddq_data_valid
+ * If set, the data in the data field is valid and contain the information
+ * for the queried index.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1);
+
/* reg_mddq_slot_info_provisioned
* If set, the INI file is applied and the card is provisioned.
* Access: RO
@@ -11743,6 +11600,61 @@ mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index,
*p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload);
}
+/* reg_mddq_device_info_flash_owner
+ * If set, the device is the flash owner. Otherwise, a shared flash
+ * is used by this device (another device is the flash owner).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1);
+
+/* reg_mddq_device_info_device_index
+ * Device index. The first device should number 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8);
+
+/* reg_mddq_device_info_fw_major
+ * Major FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16);
+
+/* reg_mddq_device_info_fw_minor
+ * Minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16);
+
+/* reg_mddq_device_info_fw_sub_minor
+ * Sub-minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16);
+
+static inline void
+mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index,
+ u8 request_msg_seq)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO);
+ mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq);
+}
+
+static inline void
+mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq,
+ bool *p_data_valid, bool *p_flash_owner,
+ u8 *p_device_index, u16 *p_fw_major,
+ u16 *p_fw_minor, u16 *p_fw_sub_minor)
+{
+ *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload);
+ *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload);
+ *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload);
+ *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload);
+ *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload);
+ *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload);
+ *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload);
+}
+
#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20
/* reg_mddq_slot_ascii_name
@@ -13011,6 +12923,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
MLXSW_REG(spevet),
+ MLXSW_REG(smpe),
MLXSW_REG(sftr2),
MLXSW_REG(smid2),
MLXSW_REG(cwtp),
@@ -13084,16 +12997,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rigr2),
MLXSW_REG(recr2),
MLXSW_REG(rmft2),
- MLXSW_REG(rxlte),
- MLXSW_REG(rxltm),
- MLXSW_REG(rlcmld),
- MLXSW_REG(rlpmce),
- MLXSW_REG(xltq),
- MLXSW_REG(xmdr),
- MLXSW_REG(xrmt),
- MLXSW_REG(xralta),
- MLXSW_REG(xralst),
- MLXSW_REG(xraltb),
+ MLXSW_REG(reiv),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
@@ -13124,9 +13028,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtpppc),
MLXSW_REG(mtpptr),
MLXSW_REG(mtptpt),
+ MLXSW_REG(mtpcpc),
MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
MLXSW_REG(mbct),
+ MLXSW_REG(mddt),
MLXSW_REG(mddq),
MLXSW_REG(mddc),
MLXSW_REG(mfde),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index daacf6291253..19ae0d1c74a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -11,6 +11,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_PGT_SIZE,
MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
@@ -23,6 +24,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
MLXSW_RES_ID_MAX_SYSTEM_PORT,
+ MLXSW_RES_ID_FID,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
@@ -69,6 +71,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_PGT_SIZE] = 0x1004,
[MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
[MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
@@ -81,6 +84,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
[MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
+ [MLXSW_RES_ID_FID] = 0x2512,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index cafd206e8d7e..1e240cdd9cbd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -29,6 +29,7 @@
#include <net/pkt_cls.h>
#include <net/netevent.h>
#include <net/addrconf.h>
+#include <linux/ptp_classify.h>
#include "spectrum.h"
#include "pci.h"
@@ -166,7 +167,7 @@ MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
* set, otherwise calculated based on the packet's VID using VID to FID mapping.
* Valid for data packets only.
*/
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
/* tx_hdr_type
* 0 - Data packets
@@ -230,8 +231,8 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
@@ -246,6 +247,82 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+int
+mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr;
+ u16 max_fid;
+ int err;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ err = -ENOMEM;
+ goto err_skb_cow_head;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
+ err = -EIO;
+ goto err_res_valid;
+ }
+ max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ return 0;
+
+err_res_valid:
+err_skb_cow_head:
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
+{
+ unsigned int type;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ type = ptp_classify_raw(skb);
+ return !!ptp_parse_header(skb, type);
+}
+
+static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
+ * need special handling and cannot be transmitted as regular control
+ * packets.
+ */
+ if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
+ return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
+ mlxsw_sp_port, skb,
+ tx_info);
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
{
switch (state) {
@@ -648,12 +725,6 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
@@ -664,7 +735,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- mlxsw_sp_txhdr_construct(skb, &tx_info);
+ err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
+ &tx_info);
+ if (err)
+ return NETDEV_TX_OK;
+
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
*/
@@ -1999,7 +2074,6 @@ __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
for (i = 1; i < max_ports; i++)
@@ -2007,12 +2081,10 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
/* Make sure all scheduled events are processed */
__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
- devl_lock(devlink);
for (i = 1; i < max_ports; i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
- devl_unlock(devlink);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
}
@@ -2034,7 +2106,6 @@ mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_port_mapping_events *events;
struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
@@ -2057,7 +2128,6 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
goto err_event_enable;
}
- devl_lock(devlink);
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
@@ -2070,7 +2140,6 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_port_create;
}
- devl_unlock(devlink);
return 0;
err_port_create:
@@ -2080,7 +2149,6 @@ err_port_create:
i = max_ports;
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
- devl_unlock(devlink);
err_event_enable:
for (i--; i >= 1; i--)
mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
@@ -2105,9 +2173,6 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
for (i = 1; i < max_ports; i++) {
- if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
- continue;
-
port_mapping = &mlxsw_sp->port_mapping[i];
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
if (err)
@@ -2676,6 +2741,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2692,6 +2758,24 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
+ .clock_init = mlxsw_sp2_ptp_clock_init,
+ .clock_fini = mlxsw_sp2_ptp_clock_fini,
+ .init = mlxsw_sp2_ptp_init,
+ .fini = mlxsw_sp2_ptp_fini,
+ .receive = mlxsw_sp2_ptp_receive,
+ .transmitted = mlxsw_sp2_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp2_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -3013,6 +3097,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_sp_pgt_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
+ goto err_pgt_init;
+ }
+
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
@@ -3100,7 +3190,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_router_init;
}
- if (mlxsw_sp->bus_info->read_frc_capable) {
+ if (mlxsw_sp->bus_info->read_clock_capable) {
/* NULL is a valid return value from clock_init */
mlxsw_sp->clock =
mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
@@ -3204,6 +3294,8 @@ err_traps_init:
err_policers_init:
mlxsw_sp_fids_fini(mlxsw_sp);
err_fids_init:
+ mlxsw_sp_pgt_fini(mlxsw_sp);
+err_pgt_init:
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
return err;
@@ -3235,7 +3327,9 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
+ mlxsw_sp->pgt_smpe_index_valid = true;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3267,7 +3361,9 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3299,7 +3395,9 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3323,7 +3421,7 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
- mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
@@ -3331,7 +3429,9 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3364,28 +3464,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
}
-/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
- * 802.1Q FIDs
- */
-#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
- VLAN_VID_MASK - 1)
-
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
.used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
@@ -3399,25 +3491,22 @@ static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
};
static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvh_xlt_cache_mode = 1,
- .kvh_xlt_cache_mode = 1,
+ .used_ubridge = 1,
+ .ubridge = 1,
.swid_config = {
{
.used_type = 1,
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
};
static void
@@ -3477,19 +3566,19 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
&hash_single_size_params);
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- kvd_size, MLXSW_SP_RESOURCE_KVD,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
if (err)
return err;
linear_size = profile->kvd_linear_size;
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
- linear_size,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- MLXSW_SP_RESOURCE_KVD,
- &linear_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &linear_size_params);
if (err)
return err;
@@ -3502,20 +3591,20 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
- double_size,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- MLXSW_SP_RESOURCE_KVD,
- &hash_double_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_double_size_params);
if (err)
return err;
single_size = kvd_size - double_size - linear_size;
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
- single_size,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- MLXSW_SP_RESOURCE_KVD,
- &hash_single_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_single_size_params);
if (err)
return err;
@@ -3536,10 +3625,10 @@ static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- kvd_size, MLXSW_SP_RESOURCE_KVD,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params);
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
}
static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
@@ -3555,10 +3644,10 @@ static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&span_size_params, max_span, max_span,
1, DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
- max_span, MLXSW_SP_RESOURCE_SPAN,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &span_size_params);
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
+ max_span, MLXSW_SP_RESOURCE_SPAN,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &span_size_params);
}
static int
@@ -3577,12 +3666,31 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
max_rif_mac_profiles, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink,
- "rif_mac_profiles",
- max_rif_mac_profiles,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ return devl_resource_register(devlink,
+ "rif_mac_profiles",
+ max_rif_mac_profiles,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
+static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u64 max_rifs;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
+ return -EIO;
+
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
+ devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, "rifs", max_rifs,
+ MLXSW_SP_RESOURCE_RIFS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
}
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
@@ -3609,13 +3717,18 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rif_mac_profile_register;
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
return 0;
+err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3643,13 +3756,18 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rif_mac_profile_register;
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
return 0;
+err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3673,15 +3791,15 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
* granularity from the profile. In case the user
* provided the sizes they are obtained via devlink.
*/
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- p_linear_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
if (err)
*p_linear_size = profile->kvd_linear_size;
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- p_double_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
if (err) {
double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_linear_size;
@@ -3692,9 +3810,9 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
MLXSW_SP_KVD_GRANULARITY);
}
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- p_single_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
if (err)
*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_double_size - *p_linear_size;
@@ -3807,6 +3925,7 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
+ .sdq_supports_cqe_v2 = false,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
@@ -3845,6 +3964,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
@@ -3883,6 +4003,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp4_driver = {
@@ -3919,6 +4040,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a60d2bbd3aa6..c8ff2a6d7e90 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -68,6 +68,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ MLXSW_SP_RESOURCE_RIFS,
};
struct mlxsw_sp_port;
@@ -111,15 +112,6 @@ enum mlxsw_sp_nve_type {
MLXSW_SP_NVE_TYPE_VXLAN,
};
-struct mlxsw_sp_mid {
- struct list_head list;
- unsigned char addr[ETH_ALEN];
- u16 fid;
- u16 mid;
- bool in_hw;
- unsigned long *ports_in_mid; /* bits array */
-};
-
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
@@ -142,6 +134,7 @@ struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
+struct mlxsw_sp_pgt;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -210,10 +203,13 @@ struct mlxsw_sp {
const struct mlxsw_sp_mall_ops *mall_ops;
const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
+ const struct mlxsw_sp_fid_family **fid_family_arr;
size_t listeners_count;
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
+ struct mlxsw_sp_pgt *pgt;
+ bool pgt_smpe_index_valid;
};
struct mlxsw_sp_ptp_ops {
@@ -247,6 +243,10 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+ int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
};
static inline struct mlxsw_sp_upper *
@@ -389,6 +389,31 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
+struct mlxsw_sp_ports_bitmap {
+ unsigned long *bitmap;
+ unsigned int nbits;
+};
+
+static inline int
+mlxsw_sp_port_bitmap_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ unsigned int nbits = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ ports_bm->nbits = nbits;
+ ports_bm->bitmap = bitmap_zalloc(nbits, GFP_KERNEL);
+ if (!ports_bm->bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+mlxsw_sp_port_bitmap_fini(struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ bitmap_free(ports_bm->bitmap);
+}
+
static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
bool *trap_en)
{
@@ -679,6 +704,12 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -715,6 +746,7 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
@@ -1236,7 +1268,6 @@ int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
@@ -1264,7 +1295,8 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid);
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
@@ -1286,6 +1318,9 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+extern const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[];
+extern const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[];
+
/* spectrum_mr.c */
enum mlxsw_sp_mr_route_prio {
MLXSW_SP_MR_ROUTE_PRIO_SG,
@@ -1443,4 +1478,16 @@ int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
+/* spectrum_pgt.c */
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid);
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base);
+int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member);
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index d20e794e01ca..1e3fc989393c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -216,8 +216,8 @@ mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
u64 resource_size;
int err;
- err = devlink_resource_size_get(devlink, info->resource_id,
- &resource_size);
+ err = devl_resource_size_get(devlink, info->resource_id,
+ &resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
@@ -338,22 +338,22 @@ static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
if (err)
return err;
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- mlxsw_sp1_kvdl_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- mlxsw_sp1_kvdl_single_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- mlxsw_sp1_kvdl_chunks_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- mlxsw_sp1_kvdl_large_chunks_occ_get,
- kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
return 0;
}
@@ -362,14 +362,14 @@ static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl *kvdl = priv;
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp1_kvdl_parts_fini(kvdl);
}
@@ -396,32 +396,32 @@ int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
- MLXSW_SP1_KVDL_SINGLE_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
- MLXSW_SP1_KVDL_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 10ae1115de6c..24ff305a2995 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -15,7 +15,7 @@ struct mlxsw_sp2_kvdl_part_info {
* usage bits we need and how many indexes there are
* represented by a single bit. This could be got from FW
* querying appropriate resources. So have the resource
- * ids for for this purpose in partition definition.
+ * ids for this purpose in partition definition.
*/
enum mlxsw_res_id usage_bit_count_res_id;
enum mlxsw_res_id index_range_res_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index c68fc8f7ca99..c9f1c79f3f9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1290,12 +1290,12 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_sb_mms_init;
mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
- err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
- mlxsw_sp->sb->sb_size,
- ing_pool_count,
- eg_pool_count,
- MLXSW_SP_SB_ING_TC_COUNT,
- MLXSW_SP_SB_EG_TC_COUNT);
+ err = devl_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ mlxsw_sp->sb->sb_size,
+ ing_pool_count,
+ eg_pool_count,
+ MLXSW_SP_SB_ING_TC_COUNT,
+ MLXSW_SP_SB_EG_TC_COUNT);
if (err)
goto err_devlink_sb_register;
@@ -1314,7 +1314,7 @@ err_sb_ports_init:
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
- devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
+ devl_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
mlxsw_sp_sb_ports_fini(mlxsw_sp);
kfree(mlxsw_sp->sb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index fc2257753b9b..ee59c79156e4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -67,16 +67,16 @@ static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
return -EIO;
sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
res_id);
- err = devlink_resource_size_get(devlink,
- sub_pool->resource_id,
- &sub_pool->size);
+ err = devl_resource_size_get(devlink,
+ sub_pool->resource_id,
+ &sub_pool->size);
if (err)
goto err_resource_size_get;
- devlink_resource_occ_get_register(devlink,
- sub_pool->resource_id,
- mlxsw_sp_counter_sub_pool_occ_get,
- sub_pool);
+ devl_resource_occ_get_register(devlink,
+ sub_pool->resource_id,
+ mlxsw_sp_counter_sub_pool_occ_get,
+ sub_pool);
sub_pool->base_index = base_index;
base_index += sub_pool->size;
@@ -88,8 +88,8 @@ err_resource_size_get:
for (i--; i >= 0; i--) {
sub_pool = &pool->sub_pools[i];
- devlink_resource_occ_get_unregister(devlink,
- sub_pool->resource_id);
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
}
return err;
}
@@ -105,8 +105,8 @@ static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
sub_pool = &pool->sub_pools[i];
WARN_ON(atomic_read(&sub_pool->active_entries_count));
- devlink_resource_occ_get_unregister(devlink,
- sub_pool->resource_id);
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
}
}
@@ -135,12 +135,12 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
spin_lock_init(&pool->counter_pool_lock);
atomic_set(&pool->active_entries_count, 0);
- err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
- &pool->pool_size);
+ err = devl_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ &pool->pool_size);
if (err)
goto err_pool_resource_size_get;
- devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
- mlxsw_sp_counter_pool_occ_get, pool);
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ mlxsw_sp_counter_pool_occ_get, pool);
pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
if (!pool->usage) {
@@ -157,8 +157,8 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
err_sub_pools_init:
bitmap_free(pool->usage);
err_usage_alloc:
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_COUNTERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
err_pool_resource_size_get:
kfree(pool);
return err;
@@ -174,8 +174,8 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
pool->pool_size);
WARN_ON(atomic_read(&pool->active_entries_count));
bitmap_free(pool->usage);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_COUNTERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
kfree(pool);
}
@@ -262,12 +262,12 @@ int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, pool_size,
pool_size, bank_size,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink,
- MLXSW_SP_RESOURCE_NAME_COUNTERS,
- pool_size,
- MLXSW_SP_RESOURCE_COUNTERS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ err = devl_resource_register(devlink,
+ MLXSW_SP_RESOURCE_NAME_COUNTERS,
+ pool_size,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
if (err)
return err;
@@ -287,12 +287,12 @@ int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, sub_pool_size,
sub_pool_size, bank_size,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink,
- sub_pool->resource_name,
- sub_pool_size,
- sub_pool->resource_id,
- MLXSW_SP_RESOURCE_COUNTERS,
- &size_params);
+ err = devl_resource_register(devlink,
+ sub_pool->resource_name,
+ sub_pool_size,
+ sub_pool->resource_id,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ &size_params);
if (err)
return err;
total_bank_config += sub_pool->bank_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 5d494fabf93d..5416093c0e35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -295,17 +295,17 @@ static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
- &mlxsw_sp_erif_ops,
- mlxsw_sp, false);
+ return devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
+ &mlxsw_sp_erif_ops,
+ mlxsw_sp, false);
}
static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
+ devl_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
}
static int mlxsw_sp_dpipe_table_host_matches_dump(struct sk_buff *skb, int type)
@@ -749,25 +749,25 @@ static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
return err;
}
@@ -775,8 +775,8 @@ static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
}
static int
@@ -826,25 +826,25 @@ static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
return err;
}
@@ -852,8 +852,8 @@ static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
}
static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv,
@@ -1231,25 +1231,25 @@ static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
return err;
}
@@ -1257,8 +1257,8 @@ static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
}
int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
@@ -1266,10 +1266,8 @@ int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_headers_register(devlink,
- &mlxsw_sp_dpipe_headers);
- if (err)
- return err;
+ devl_dpipe_headers_register(devlink, &mlxsw_sp_dpipe_headers);
+
err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp);
if (err)
goto err_erif_table_init;
@@ -1294,7 +1292,7 @@ err_host6_table_init:
err_host4_table_init:
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
err_erif_table_init:
- devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
+ devl_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
return err;
}
@@ -1306,5 +1304,5 @@ void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
- devlink_dpipe_headers_unregister(devlink);
+ devl_dpipe_headers_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index ce80931f0402..045a24cacfa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -22,11 +22,18 @@ struct mlxsw_sp_fid_core {
unsigned int *port_fid_mappings;
};
+struct mlxsw_sp_fid_port_vid {
+ struct list_head list;
+ u16 local_port;
+ u16 vid;
+};
+
struct mlxsw_sp_fid {
struct list_head list;
struct mlxsw_sp_rif *rif;
refcount_t ref_count;
u16 fid_index;
+ u16 fid_offset;
struct mlxsw_sp_fid_family *fid_family;
struct rhash_head ht_node;
@@ -37,6 +44,7 @@ struct mlxsw_sp_fid {
int nve_ifindex;
u8 vni_valid:1,
nve_flood_index_valid:1;
+ struct list_head port_vid_list; /* Ordered by local port. */
};
struct mlxsw_sp_fid_8021q {
@@ -63,7 +71,6 @@ static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
- enum mlxsw_reg_sfgc_bridge_type bridge_type;
enum mlxsw_flood_table_type table_type;
int table_index;
};
@@ -76,18 +83,18 @@ struct mlxsw_sp_fid_ops {
u16 *p_fid_index);
bool (*compare)(const struct mlxsw_sp_fid *fid,
const void *arg);
- u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
int (*port_vid_map)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
- int (*vni_set)(struct mlxsw_sp_fid *fid, __be32 vni);
+ int (*vni_set)(struct mlxsw_sp_fid *fid);
void (*vni_clear)(struct mlxsw_sp_fid *fid);
- int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index);
+ int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid);
void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
+ int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif);
};
struct mlxsw_sp_fid_family {
@@ -102,7 +109,10 @@ struct mlxsw_sp_fid_family {
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
- u8 lag_vid_valid:1;
+ bool flood_rsp;
+ enum mlxsw_reg_bridge_type bridge_type;
+ u16 pgt_base;
+ bool smpe_index_valid;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
@@ -137,11 +147,6 @@ bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
return fid_family->start_index == fid_index;
}
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_family->lag_vid_valid;
-}
-
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
@@ -206,17 +211,20 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
int err;
- if (WARN_ON(!ops->nve_flood_index_set || fid->nve_flood_index_valid))
+ if (WARN_ON(fid->nve_flood_index_valid))
return -EINVAL;
- err = ops->nve_flood_index_set(fid, nve_flood_index);
- if (err)
- return err;
-
fid->nve_flood_index = nve_flood_index;
fid->nve_flood_index_valid = true;
+ err = ops->nve_flood_index_set(fid);
+ if (err)
+ goto err_nve_flood_index_set;
return 0;
+
+err_nve_flood_index_set:
+ fid->nve_flood_index_valid = false;
+ return err;
}
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
@@ -224,7 +232,7 @@ void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
- if (WARN_ON(!ops->nve_flood_index_clear || !fid->nve_flood_index_valid))
+ if (WARN_ON(!fid->nve_flood_index_valid))
return;
fid->nve_flood_index_valid = false;
@@ -244,7 +252,7 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
int err;
- if (WARN_ON(!ops->vni_set || fid->vni_valid))
+ if (WARN_ON(fid->vni_valid))
return -EINVAL;
fid->nve_type = type;
@@ -256,15 +264,15 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
if (err)
return err;
- err = ops->vni_set(fid, vni);
+ fid->vni_valid = true;
+ err = ops->vni_set(fid);
if (err)
goto err_vni_set;
- fid->vni_valid = true;
-
return 0;
err_vni_set:
+ fid->vni_valid = false;
rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
mlxsw_sp_fid_vni_ht_params);
return err;
@@ -276,7 +284,7 @@ void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid)
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (WARN_ON(!ops->vni_clear || !fid->vni_valid))
+ if (WARN_ON(!fid->vni_valid))
return;
fid->vni_valid = false;
@@ -316,34 +324,43 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
return NULL;
}
+static u16
+mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
+{
+ return fid_family->end_index - fid_family->start_index + 1;
+}
+
+static u16
+mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 fid_offset)
+{
+ u16 num_fids;
+
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ return fid_family->pgt_base + num_fids * flood_table->table_index +
+ fid_offset;
+}
+
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
- const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
- char *sftr2_pl;
- int err;
+ u16 mid_index;
- if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
+ if (WARN_ON(!fid_family->flood_tables))
return -EINVAL;
flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
if (!flood_table)
return -ESRCH;
- sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
- if (!sftr2_pl)
- return -ENOMEM;
-
- mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
- ops->flood_index(fid), flood_table->table_type, 1,
- local_port, member);
- err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
- sftr2_pl);
- kfree(sftr2_pl);
- return err;
+ mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
+ fid->fid_offset);
+ return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
+ fid->fid_index, local_port, member);
}
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
@@ -370,11 +387,6 @@ enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
return fid->fid_family->type;
}
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
-{
- fid->rif = rif;
-}
-
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
{
return fid->rif;
@@ -405,6 +417,7 @@ static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
u16 vid = *(u16 *) arg;
mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
@@ -413,38 +426,341 @@ static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
MLXSW_REG_SFMR_OP_DESTROY_FID;
}
-static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u16 fid_offset, bool valid)
+static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
- mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
- fid_offset);
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
+ fid->fid_offset, fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- __be32 vni, bool vni_valid, u32 nve_flood_index,
- bool nve_flood_index_valid)
+static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ fid->fid_index, fid->fid_offset,
+ fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
+ mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
+ mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
+ mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
+ mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, fid->nve_flood_index);
+
+ if (rif) {
+ mlxsw_reg_sfmr_irif_v_set(sfmr_pl, true);
+ mlxsw_reg_sfmr_irif_set(sfmr_pl, mlxsw_sp_rif_index(rif));
+ }
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid_index,
- 0);
- mlxsw_reg_sfmr_vv_set(sfmr_pl, vni_valid);
- mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(vni));
- mlxsw_reg_sfmr_vtfp_set(sfmr_pl, nve_flood_index_valid);
- mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, nve_flood_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+static int mlxsw_sp_fid_vni_to_fid_map(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif,
+ bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vni_pack(svfa_pl, valid, fid->fid_index,
+ be32_to_cpu(fid->vni), irif_valid, irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_edit_op(fid, rif);
+}
+
+static int mlxsw_sp_fid_vni_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ if (!fid->vni_valid)
+ return 0;
+
+ return mlxsw_sp_fid_vni_to_fid_map(fid, rif, fid->vni_valid);
+}
+
+static int
+mlxsw_sp_fid_vid_to_fid_map(const struct mlxsw_sp_fid *fid, u16 vid, bool valid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vid_pack(svfa_pl, valid, fid->fid_index, vid, irif_valid,
+ irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int
+mlxsw_sp_fid_8021q_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ /* Update the global VID => FID mapping we created when the FID was
+ * configured.
+ */
+ return mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, rif);
+}
+
+static int
+mlxsw_sp_fid_port_vid_to_fid_rif_update_one(const struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_fid_port_vid *pv,
+ bool irif_valid, u16 irif_index)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, pv->local_port, true,
+ fid->fid_index, pv->vid, irif_valid,
+ irif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_vid_to_fid_rif_set(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+ u16 irif_index;
+ int err;
+
+ err = fid->fid_family->ops->vid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ irif_index = mlxsw_sp_rif_index(rif);
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated with the
+ * ingress RIF.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ err = mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv,
+ true,
+ irif_index);
+ if (err)
+ goto err_port_vid_to_fid_rif_update_one;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_rif_update_one:
+ list_for_each_entry_continue_reverse(pv, &fid->port_vid_list, list) {
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+static void mlxsw_sp_fid_vid_to_fid_rif_unset(const struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_reiv_handle(struct mlxsw_sp_fid *fid, u16 rif_index,
+ bool valid, u8 port_page)
+{
+ u16 local_port_end = (port_page + 1) * MLXSW_REG_REIV_REC_MAX_COUNT - 1;
+ u16 local_port_start = port_page * MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *port_vid;
+ u8 rec_num, entries_num = 0;
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+
+ list_for_each_entry(port_vid, &fid->port_vid_list, list) {
+ /* port_vid_list is sorted by local_port. */
+ if (port_vid->local_port < local_port_start)
+ continue;
+
+ if (port_vid->local_port > local_port_end)
+ break;
+
+ rec_num = port_vid->local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num,
+ valid ? port_vid->vid : 0);
+ entries_num++;
+ }
+
+ if (!entries_num) {
+ kfree(reiv_pl);
+ return 0;
+ }
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ if (err)
+ goto err_reg_write;
+
+ kfree(reiv_pl);
+ return 0;
+
+err_reg_write:
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_erif_eport_to_vid_map(struct mlxsw_sp_fid *fid,
+ u16 rif_index, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u8 num_port_pages;
+ int err, i;
+
+ num_port_pages = mlxsw_core_max_ports(mlxsw_sp->core) /
+ MLXSW_REG_REIV_REC_MAX_COUNT + 1;
+
+ for (i = 0; i < num_port_pages; i++) {
+ err = mlxsw_sp_fid_reiv_handle(fid, rif_index, valid, i);
+ if (err)
+ goto err_reiv_handle;
+ }
+
+ return 0;
+
+err_reiv_handle:
+ for (; i >= 0; i--)
+ mlxsw_sp_fid_reiv_handle(fid, rif_index, !valid, i);
+ return err;
+}
+
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ u16 rif_index = mlxsw_sp_rif_index(rif);
+ int err;
+
+ err = mlxsw_sp_fid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni_to_fid_rif_update(fid, rif);
+ if (err)
+ goto err_vni_to_fid_rif_update;
+
+ err = mlxsw_sp_fid_vid_to_fid_rif_set(fid, rif);
+ if (err)
+ goto err_vid_to_fid_rif_set;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, true);
+ if (err)
+ goto err_erif_eport_to_vid_map;
+
+ fid->rif = rif;
+ return 0;
+
+err_erif_eport_to_vid_map:
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+err_vid_to_fid_rif_set:
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+err_vni_to_fid_rif_update:
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid)
+{
+ u16 rif_index;
+
+ if (!fid->rif)
+ return;
+
+ rif_index = mlxsw_sp_rif_index(fid->rif);
+ fid->rif = NULL;
+
+ mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, false);
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_vni_op(const struct mlxsw_sp_fid *fid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, fid->vni_valid);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_edit_op(fid, fid->rif);
+ if (err)
+ goto err_fid_edit_op;
+
+ return 0;
+
+err_fid_edit_op:
+ mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, !fid->vni_valid);
+ return err;
+}
+
+static int __mlxsw_sp_fid_port_vid_map(const struct mlxsw_sp_fid *fid,
u16 local_port, u16 vid, bool valid)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid = false;
+ u16 irif_index = 0;
+
+ if (fid->rif) {
+ irif_valid = true;
+ irif_index = mlxsw_sp_rif_index(fid->rif);
+ }
- mlxsw_reg_svfa_pack(svfa_pl, local_port, mt, valid, fid_index, vid);
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, local_port, valid, fid->fid_index,
+ vid, irif_valid, irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
@@ -459,20 +775,19 @@ static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
int br_ifindex = *(int *) arg;
mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_op(fid_family->mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
{
if (fid->vni_valid)
mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
@@ -498,14 +813,8 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
}
-static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_index - VLAN_N_VID;
-}
-
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
int err;
@@ -517,7 +826,7 @@ static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ err = __mlxsw_sp_fid_port_vid_map(fid,
mlxsw_sp_port->local_port,
vid, true);
if (err)
@@ -540,8 +849,7 @@ err_fid_port_vid_map:
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
return err;
@@ -549,7 +857,6 @@ err_fid_port_vid_map:
static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
@@ -562,12 +869,108 @@ static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
}
+static int
+mlxsw_sp_fid_port_vid_list_add(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp_port_vid;
+
+ port_vid = kzalloc(sizeof(*port_vid), GFP_KERNEL);
+ if (!port_vid)
+ return -ENOMEM;
+
+ port_vid->local_port = local_port;
+ port_vid->vid = vid;
+
+ list_for_each_entry(tmp_port_vid, &fid->port_vid_list, list) {
+ if (tmp_port_vid->local_port > local_port)
+ break;
+ }
+
+ list_add_tail(&port_vid->list, &tmp_port_vid->list);
+ return 0;
+}
+
+static void
+mlxsw_sp_fid_port_vid_list_del(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp;
+
+ list_for_each_entry_safe(port_vid, tmp, &fid->port_vid_list, list) {
+ if (port_vid->local_port != local_port || port_vid->vid != vid)
+ continue;
+
+ list_del(&port_vid->list);
+ kfree(port_vid);
+ return;
+ }
+}
+
+static int
+mlxsw_sp_fid_mpe_table_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char smpe_pl[MLXSW_REG_SMPE_LEN];
+
+ mlxsw_reg_smpe_pack(smpe_pl, local_port, fid->fid_index,
+ valid ? vid : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smpe), smpe_pl);
+}
+
+static int
+mlxsw_sp_fid_erif_eport_to_vid_map_one(const struct mlxsw_sp_fid *fid,
+ u16 local_port, u16 vid, bool valid)
+{
+ u8 port_page = local_port / MLXSW_REG_REIV_REC_MAX_COUNT;
+ u8 rec_num = local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u16 rif_index = mlxsw_sp_rif_index(fid->rif);
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num, valid ? vid : 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_evid_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, valid);
+ if (err)
+ return err;
+
+ if (!fid->rif)
+ return 0;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ valid);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+
+ return 0;
+
+err_erif_eport_to_vid_map_one:
+ mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, !valid);
+ return err;
+}
+
static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
@@ -576,11 +979,20 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, true);
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
if (err)
return err;
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -591,8 +1003,11 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
return err;
}
@@ -606,43 +1021,29 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
}
-static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, vni,
- true, fid->nve_flood_index,
- fid->nve_flood_index_valid);
+ return mlxsw_sp_fid_vni_op(fid);
}
static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, 0, false,
- fid->nve_flood_index, fid->nve_flood_index_valid);
+ mlxsw_sp_fid_vni_op(fid);
}
-static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index)
+static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index,
- fid->vni, fid->vni_valid, nve_flood_index,
- true);
+ return mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, fid->vni,
- fid->vni_valid, 0, false);
+ mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void
@@ -652,13 +1053,19 @@ mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, 0);
}
+static int
+mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
.index_alloc = mlxsw_sp_fid_8021d_index_alloc,
.compare = mlxsw_sp_fid_8021d_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
.vni_set = mlxsw_sp_fid_8021d_vni_set,
@@ -666,42 +1073,32 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
};
+#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
+#define MLXSW_SP_FID_RFID_MAX (11 * 1024)
+#define MLXSW_SP_FID_8021Q_PGT_BASE 0
+#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX)
+
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
{
.packet_type = MLXSW_SP_FLOOD_TYPE_UC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 0,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_MC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 1,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_BC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 2,
},
};
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
- .type = MLXSW_SP_FID_TYPE_8021D,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
- .start_index = VLAN_N_VID,
- .end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
- .lag_vid_valid = 1,
-};
-
static bool
mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
{
@@ -717,48 +1114,19 @@ mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
- .setup = mlxsw_sp_fid_8021q_setup,
- .configure = mlxsw_sp_fid_8021d_configure,
- .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
- .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
- .compare = mlxsw_sp_fid_8021q_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
- .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
- .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
- .vni_set = mlxsw_sp_fid_8021d_vni_set,
- .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
- .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
- .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
- .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
-};
-
-/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
-#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
-#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
- VLAN_VID_MASK - 2)
-
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
- .type = MLXSW_SP_FID_TYPE_8021Q,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
- .start_index = MLXSW_SP_FID_8021Q_EMU_START,
- .end_index = MLXSW_SP_FID_8021Q_EMU_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_emu_ops,
- .lag_vid_valid = 1,
-};
+static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
{
- /* rFIDs are allocated by the device during init */
- return 0;
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
{
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
@@ -787,9 +1155,28 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- /* We only need to transition the port to virtual mode since
- * {Port, VID} => FID is done by the firmware upon RIF creation.
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ return err;
+
+ /* Using legacy bridge model, we only need to transition the port to
+ * virtual mode since {Port, VID} => FID is done by the firmware upon
+ * RIF creation. Using unified bridge model, we need to map
+ * {Port, VID} => FID and map egress VID.
*/
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
+ if (err)
+ goto err_port_vid_map;
+
+ if (fid->rif) {
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port,
+ vid, true);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+ }
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -800,6 +1187,13 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+err_erif_eport_to_vid_map_one:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+err_port_vid_map:
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
return err;
}
@@ -813,39 +1207,69 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+}
+
+static int mlxsw_sp_fid_rfid_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_rfid_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int
+mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .setup = mlxsw_sp_fid_rfid_setup,
.configure = mlxsw_sp_fid_rfid_configure,
.deconfigure = mlxsw_sp_fid_rfid_deconfigure,
.index_alloc = mlxsw_sp_fid_rfid_index_alloc,
.compare = mlxsw_sp_fid_rfid_compare,
.port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_rfid_vni_set,
+ .vni_clear = mlxsw_sp_fid_rfid_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
};
-#define MLXSW_SP_RFID_BASE (15 * 1024)
-#define MLXSW_SP_RFID_MAX 1024
-
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
- .type = MLXSW_SP_FID_TYPE_RFID,
- .fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = MLXSW_SP_RFID_BASE,
- .end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
- .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
- .ops = &mlxsw_sp_fid_rfid_ops,
-};
+static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
-
- return mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
{
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
@@ -862,26 +1286,252 @@ static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
return true;
}
+static int mlxsw_sp_fid_dummy_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_dummy_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .setup = mlxsw_sp_fid_dummy_setup,
.configure = mlxsw_sp_fid_dummy_configure,
.deconfigure = mlxsw_sp_fid_dummy_deconfigure,
.index_alloc = mlxsw_sp_fid_dummy_index_alloc,
.compare = mlxsw_sp_fid_dummy_compare,
+ .vni_set = mlxsw_sp_fid_dummy_vni_set,
+ .vni_clear = mlxsw_sp_fid_dummy_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_dummy_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
+};
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ int err;
+
+ err = mlxsw_sp_fid_op(fid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, fid->rif);
+ if (err)
+ goto err_vid_to_fid_map;
+
+ return 0;
+
+err_vid_to_fid_map:
+ mlxsw_sp_fid_op(fid, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
+
+ mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, false, NULL);
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured, otherwise, configure new mapping.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]) {
+ err = __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, true);
+ if (err)
+ return err;
+ }
+
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
+ return 0;
+
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+};
+
+/* There are 4K-2 802.1Q FIDs */
+#define MLXSW_SP_FID_8021Q_START 1 /* FID 0 is reserved. */
+#define MLXSW_SP_FID_8021Q_END (MLXSW_SP_FID_8021Q_START + \
+ MLXSW_SP_FID_8021Q_MAX - 1)
+
+/* There are 1K 802.1D FIDs */
+#define MLXSW_SP_FID_8021D_START (MLXSW_SP_FID_8021Q_END + 1)
+#define MLXSW_SP_FID_8021D_END (MLXSW_SP_FID_8021D_START + \
+ MLXSW_SP_FID_8021D_MAX - 1)
+
+/* There is one dummy FID */
+#define MLXSW_SP_FID_DUMMY (MLXSW_SP_FID_8021D_END + 1)
+
+/* There are 11K rFIDs */
+#define MLXSW_SP_RFID_START (MLXSW_SP_FID_DUMMY + 1)
+#define MLXSW_SP_RFID_END (MLXSW_SP_RFID_START + \
+ MLXSW_SP_FID_RFID_MAX - 1)
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_START,
+ .end_index = MLXSW_SP_RFID_END,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+ .flood_rsp = true,
+ .smpe_index_valid = false,
+};
+
+const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = true,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = VLAN_N_VID - 1,
- .end_index = VLAN_N_VID - 1,
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
.ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
};
-static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
- [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
+const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
- [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
@@ -919,6 +1569,8 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fid->port_vid_list);
fid->fid_family = fid_family;
err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
@@ -927,8 +1579,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid->fid_index = fid_index;
__set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
- if (fid->fid_family->ops->setup)
- fid->fid_family->ops->setup(fid, arg);
+ fid->fid_family->ops->setup(fid, arg);
err = fid->fid_family->ops->configure(fid);
if (err)
@@ -967,6 +1618,7 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
fid->fid_family->ops->deconfigure(fid);
__clear_bit(fid->fid_index - fid_family->start_index,
fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid->port_vid_list));
kfree(fid);
}
@@ -1010,26 +1662,49 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
const struct mlxsw_sp_flood_table *flood_table)
{
enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
const int *sfgc_packet_types;
- int i;
+ u16 num_fids, mid_base;
+ int err, i;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids);
+ if (err)
+ return err;
sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
- struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
char sfgc_pl[MLXSW_REG_SFGC_LEN];
- int err;
if (!sfgc_packet_types[i])
continue;
- mlxsw_reg_sfgc_pack(sfgc_pl, i, flood_table->bridge_type,
- flood_table->table_type,
- flood_table->table_index);
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
+ flood_table->table_type, 0, mid_base);
+
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
if (err)
- return err;
+ goto err_reg_write;
}
return 0;
+
+err_reg_write:
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 num_fids, mid_base;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
}
static int
@@ -1050,6 +1725,19 @@ mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
return 0;
}
+static void
+mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &fid_family->flood_tables[i];
+ mlxsw_sp_fid_flood_table_fini(fid_family, flood_table);
+ }
+}
+
static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fid_family *tmpl)
{
@@ -1091,6 +1779,10 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid_family *fid_family)
{
mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+
+ if (fid_family->flood_tables)
+ mlxsw_sp_fid_flood_tables_fini(fid_family);
+
bitmap_free(fid_family->fids_bitmap);
WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
kfree(fid_family);
@@ -1144,7 +1836,7 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
err = mlxsw_sp_fid_family_register(mlxsw_sp,
- mlxsw_sp_fid_family_arr[i]);
+ mlxsw_sp->fid_family_arr[i]);
if (err)
goto err_fid_ops_register;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
new file mode 100644
index 000000000000..7dd3dba0fa83
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/refcount.h>
+#include <linux/idr.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_pgt {
+ struct idr pgt_idr;
+ u16 end_index; /* Exclusive. */
+ struct mutex lock; /* Protects PGT. */
+ bool smpe_index_valid;
+};
+
+struct mlxsw_sp_pgt_entry {
+ struct list_head ports_list;
+ u16 index;
+ u16 smpe_index;
+};
+
+struct mlxsw_sp_pgt_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+};
+
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
+{
+ int index, err = 0;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
+ mlxsw_sp->pgt->end_index, GFP_KERNEL);
+
+ if (index < 0) {
+ err = index;
+ goto err_idr_alloc;
+ }
+
+ *p_mid = index;
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
+{
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int
+mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ unsigned int idr_cursor;
+ int i, err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ /* This function is supposed to be called several times as part of
+ * driver init, in specific order. Verify that the mid_index is the
+ * first free index in the idr, to be able to free the indexes in case
+ * of error.
+ */
+ idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
+ if (WARN_ON(idr_cursor != mid_base)) {
+ err = -EINVAL;
+ goto err_idr_cursor;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
+ mid_base, mid_base + count, GFP_KERNEL);
+ if (err < 0)
+ goto err_idr_alloc_cyclic;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc_cyclic:
+ for (i--; i >= 0; i--)
+ idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
+err_idr_cursor:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void
+mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
+ int i;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ for (i = 0; i < count; i++)
+ WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+
+ list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
+ if (pgt_entry_port->local_port == local_port)
+ return pgt_entry_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ void *ret;
+ int err;
+
+ pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
+ if (!pgt_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
+ if (IS_ERR(ret)) {
+ err = PTR_ERR(ret);
+ goto err_idr_replace;
+ }
+
+ INIT_LIST_HEAD(&pgt_entry->ports_list);
+ pgt_entry->index = mid;
+ pgt_entry->smpe_index = smpe;
+ return pgt_entry;
+
+err_idr_replace:
+ kfree(pgt_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
+ struct mlxsw_sp_pgt_entry *pgt_entry)
+{
+ WARN_ON(!list_empty(&pgt_entry->ports_list));
+
+ pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
+ if (WARN_ON(IS_ERR(pgt_entry)))
+ return;
+
+ kfree(pgt_entry);
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (pgt_entry)
+ return pgt_entry;
+
+ return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
+}
+
+static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (WARN_ON(!pgt_entry))
+ return;
+
+ if (list_empty(&pgt_entry->ports_list))
+ mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
+}
+
+static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
+ bool member)
+{
+ mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
+}
+
+static int
+mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port, bool member)
+{
+ char *smid2_pl;
+ int err;
+
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
+ mlxsw_sp->pgt->smpe_index_valid,
+ pgt_entry->smpe_index);
+
+ mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+
+ kfree(smid2_pl);
+
+ return err;
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ int err;
+
+ pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
+ if (!pgt_entry_port)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
+ true);
+ if (err)
+ goto err_pgt_entry_port_write;
+
+ pgt_entry_port->local_port = local_port;
+ list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
+
+ return pgt_entry_port;
+
+err_pgt_entry_port_write:
+ kfree(pgt_entry_port);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
+
+{
+ list_del(&pgt_entry_port->list);
+ mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
+ pgt_entry_port->local_port, false);
+ kfree(pgt_entry_port);
+}
+
+static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ int err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
+ if (IS_ERR(pgt_entry)) {
+ err = PTR_ERR(pgt_entry);
+ goto err_pgt_entry_get;
+ }
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
+ local_port);
+ if (IS_ERR(pgt_entry_port)) {
+ err = PTR_ERR(pgt_entry_port);
+ goto err_pgt_entry_port_get;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_pgt_entry_port_get:
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+err_pgt_entry_get:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
+ u16 mid, u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
+ if (!pgt_entry)
+ goto out;
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
+ if (!pgt_entry_port)
+ goto out;
+
+ mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+
+out:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member)
+{
+ if (member)
+ return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
+ local_port);
+
+ mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
+ return 0;
+}
+
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_pgt *pgt;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
+ return -EIO;
+
+ pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
+ if (!pgt)
+ return -ENOMEM;
+
+ idr_init(&pgt->pgt_idr);
+ pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
+ mutex_init(&pgt->lock);
+ pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
+ mlxsw_sp->pgt = pgt;
+ return 0;
+}
+
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->pgt->lock);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
+ idr_destroy(&mlxsw_sp->pgt->pgt_idr);
+ kfree(mlxsw_sp->pgt);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
index 39052e5c12fd..22ebb207ce4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
@@ -94,10 +94,10 @@ mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family)
atomic_set(&family->policers_count, 0);
devlink = priv_to_devlink(core);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
- mlxsw_sp_policer_single_rate_occ_get,
- family);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ mlxsw_sp_policer_single_rate_occ_get,
+ family);
return 0;
}
@@ -107,8 +107,8 @@ mlxsw_sp_policer_single_rate_family_fini(struct mlxsw_sp_policer_family *family)
{
struct devlink *devlink = priv_to_devlink(family->mlxsw_sp->core);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
WARN_ON(atomic_read(&family->policers_count) != 0);
}
@@ -419,22 +419,22 @@ int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, global_policers,
global_policers, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, "global_policers",
- global_policers,
- MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ err = devl_resource_register(devlink, "global_policers",
+ global_policers,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, single_rate_policers,
single_rate_policers, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, "single_rate_policers",
- single_rate_policers,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
- MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
- &size_params);
+ err = devl_resource_register(devlink, "single_rate_policers",
+ single_rate_policers,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ &size_params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 35422e64d89f..2e0b704b8a31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -11,6 +11,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/net_tstamp.h>
+#include <linux/refcount.h>
#include "spectrum.h"
#include "spectrum_ptp.h"
@@ -29,12 +30,24 @@
struct mlxsw_sp_ptp_state {
struct mlxsw_sp *mlxsw_sp;
+};
+
+struct mlxsw_sp1_ptp_state {
+ struct mlxsw_sp_ptp_state common;
struct rhltable unmatched_ht;
spinlock_t unmatched_lock; /* protects the HT */
struct delayed_work ht_gc_dw;
u32 gc_cycle;
};
+struct mlxsw_sp2_ptp_state {
+ struct mlxsw_sp_ptp_state common;
+ refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
+ * enabled.
+ */
+ struct hwtstamp_config config;
+};
+
struct mlxsw_sp1_ptp_key {
u16 local_port;
u8 message_type;
@@ -60,20 +73,44 @@ static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
struct mlxsw_sp_ptp_clock {
struct mlxsw_core *core;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+};
+
+struct mlxsw_sp1_ptp_clock {
+ struct mlxsw_sp_ptp_clock common;
spinlock_t lock; /* protect this structure */
struct cyclecounter cycles;
struct timecounter tc;
u32 nominal_c_mult;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
unsigned long overflow_period;
struct delayed_work overflow_work;
};
-static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
+static struct mlxsw_sp1_ptp_state *
+mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp1_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp2_ptp_state *
+mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp1_ptp_clock *
+mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mlxsw_sp1_ptp_clock, common.ptp_info);
+}
+
+static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
struct ptp_system_timestamp *sts)
{
- struct mlxsw_core *mlxsw_core = clock->core;
+ struct mlxsw_core *mlxsw_core = clock->common.core;
u32 frc_h1, frc_h2, frc_l;
frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
@@ -94,20 +131,20 @@ static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(cc, struct mlxsw_sp_ptp_clock, cycles);
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
}
static int
-mlxsw_sp1_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
+mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
{
struct mlxsw_core *mlxsw_core = clock->core;
char mtutc_pl[MLXSW_REG_MTUTC_LEN];
mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
- freq_adj, 0);
+ freq_adj, 0, 0, 0);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
}
@@ -122,9 +159,9 @@ static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
}
static int
-mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
{
- struct mlxsw_core *mlxsw_core = clock->core;
+ struct mlxsw_core *mlxsw_core = clock->common.core;
u64 next_sec, next_sec_in_nsec, cycles;
char mtutc_pl[MLXSW_REG_MTUTC_LEN];
char mtpps_pl[MLXSW_REG_MTPPS_LEN];
@@ -144,14 +181,13 @@ mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
mlxsw_reg_mtutc_pack(mtutc_pl,
MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
- 0, next_sec);
+ 0, next_sec, 0, 0);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
}
static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
int neg_adj = 0;
u32 diff;
u64 adj;
@@ -174,13 +210,12 @@ static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
clock->nominal_c_mult + diff;
spin_unlock_bh(&clock->lock);
- return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
+ return mlxsw_sp_ptp_phc_adjfreq(&clock->common, neg_adj ? -ppb : ppb);
}
static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 nsec;
spin_lock_bh(&clock->lock);
@@ -195,8 +230,7 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 cycles, nsec;
spin_lock_bh(&clock->lock);
@@ -212,8 +246,7 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 nsec = timespec64_to_ns(ts);
spin_lock_bh(&clock->lock);
@@ -237,9 +270,9 @@ static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp1_ptp_clock *clock;
- clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
+ clock = container_of(dwork, struct mlxsw_sp1_ptp_clock, overflow_work);
spin_lock_bh(&clock->lock);
timecounter_read(&clock->tc);
@@ -251,7 +284,7 @@ struct mlxsw_sp_ptp_clock *
mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
u64 overflow_cycles, nsec, frac = 0;
- struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp1_ptp_clock *clock;
int err;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
@@ -265,10 +298,9 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
- clock->core = mlxsw_sp->core;
+ clock->common.core = mlxsw_sp->core;
- timecounter_init(&clock->tc, &clock->cycles,
- ktime_to_ns(ktime_get_real()));
+ timecounter_init(&clock->tc, &clock->cycles, 0);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least twice every wrap around.
@@ -286,7 +318,158 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
mlxsw_core_schedule_dw(&clock->overflow_work, 0);
- clock->ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp = ptp_clock_register(&clock->common.ptp_info, dev);
+ if (IS_ERR(clock->common.ptp)) {
+ err = PTR_ERR(clock->common.ptp);
+ dev_err(dev, "ptp_clock_register failed %d\n", err);
+ goto err_ptp_clock_register;
+ }
+
+ return &clock->common;
+
+err_ptp_clock_register:
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
+{
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
+ ptp_clock_unregister(clock_common->ptp);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+}
+
+static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ u32 utc_sec1, utc_sec2, utc_nsec;
+
+ utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (utc_sec1 != utc_sec2) {
+ /* Wrap around. */
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
+}
+
+static int
+mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+ u32 sec, nsec_rem;
+
+ sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
+ 0, sec, nsec_rem, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ /* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
+ * reversed, positive values mean to decrease the frequency. Adjust the
+ * sign of PPB to this behavior.
+ */
+ return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
+}
+
+static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+ /* HW time adjustment range is s16. If out of range, set time instead. */
+ if (delta < S16_MIN || delta > S16_MAX) {
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
+ nsec += delta;
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+ }
+
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
+ 0, 0, 0, delta);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec = timespec64_to_ns(ts);
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlxsw_sp_clock",
+ .max_adj = MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
+ .adjfine = mlxsw_sp2_ptp_adjfine,
+ .adjtime = mlxsw_sp2_ptp_adjtime,
+ .gettimex64 = mlxsw_sp2_ptp_gettimex,
+ .settime64 = mlxsw_sp2_ptp_settime,
+};
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ struct mlxsw_sp_ptp_clock *clock;
+ int err;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->core = mlxsw_sp->core;
+
+ clock->ptp_info = mlxsw_sp2_ptp_clock_info;
+
+ err = mlxsw_sp2_ptp_phc_settime(clock, 0);
+ if (err) {
+ dev_err(dev, "setting UTC time failed %d\n", err);
+ goto err_ptp_phc_settime;
+ }
+
clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
if (IS_ERR(clock->ptp)) {
err = PTR_ERR(clock->ptp);
@@ -297,15 +480,14 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
return clock;
err_ptp_clock_register:
- cancel_delayed_work_sync(&clock->overflow_work);
+err_ptp_phc_settime:
kfree(clock);
return ERR_PTR(err);
}
-void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
{
ptp_clock_unregister(clock->ptp);
- cancel_delayed_work_sync(&clock->overflow_work);
kfree(clock);
}
@@ -348,7 +530,7 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
u64 timestamp)
{
int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
- struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched;
int err;
@@ -359,7 +541,7 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
unmatched->key = key;
unmatched->skb = skb;
unmatched->timestamp = timestamp;
- unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
+ unmatched->gc_cycle = ptp_state->gc_cycle + cycles;
err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
mlxsw_sp1_ptp_unmatched_ht_params);
@@ -373,11 +555,12 @@ static struct mlxsw_sp1_ptp_unmatched *
mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key, int *p_length)
{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
struct rhlist_head *tmp, *list;
int length = 0;
- list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
+ list = rhltable_lookup(&ptp_state->unmatched_ht, &key,
mlxsw_sp1_ptp_unmatched_ht_params);
rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
last = unmatched;
@@ -392,7 +575,9 @@ static int
mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
- return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+
+ return rhltable_remove(&ptp_state->unmatched_ht,
&unmatched->ht_node,
mlxsw_sp1_ptp_unmatched_ht_params);
}
@@ -438,12 +623,16 @@ static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb,
u64 timestamp)
{
+ struct mlxsw_sp_ptp_clock *clock_common = mlxsw_sp->clock;
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
struct skb_shared_hwtstamps hwtstamps;
u64 nsec;
- spin_lock_bh(&mlxsw_sp->clock->lock);
- nsec = timecounter_cyc2time(&mlxsw_sp->clock->tc, timestamp);
- spin_unlock_bh(&mlxsw_sp->clock->lock);
+ spin_lock_bh(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ spin_unlock_bh(&clock->lock);
hwtstamps.hwtstamp = ns_to_ktime(nsec);
mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
@@ -481,13 +670,14 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key,
struct sk_buff *skb, u64 timestamp)
{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched;
int length;
int err;
rcu_read_lock();
- spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
+ spin_lock(&ptp_state->unmatched_lock);
unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
if (skb && unmatched && unmatched->timestamp) {
@@ -515,7 +705,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
WARN_ON_ONCE(err);
}
- spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
+ spin_unlock(&ptp_state->unmatched_lock);
if (unmatched)
mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
@@ -611,9 +801,10 @@ void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
+mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state *ptp_state,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
+ struct mlxsw_sp *mlxsw_sp = ptp_state->common.mlxsw_sp;
struct mlxsw_sp_ptp_port_dir_stats *stats;
struct mlxsw_sp_port *mlxsw_sp_port;
int err;
@@ -636,7 +827,7 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
/* The packet was matched with timestamp during the walk. */
goto out;
- mlxsw_sp_port = ptp_state->mlxsw_sp->ports[unmatched->key.local_port];
+ mlxsw_sp_port = mlxsw_sp->ports[unmatched->key.local_port];
if (mlxsw_sp_port) {
stats = unmatched->key.ingress ?
&mlxsw_sp_port->ptp.stats.rx_gcd :
@@ -653,7 +844,7 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
* netif_receive_skb(), in process context, is seen elsewhere in the
* kernel, notably in pktgen.
*/
- mlxsw_sp1_ptp_unmatched_finish(ptp_state->mlxsw_sp, unmatched);
+ mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
out:
local_bh_enable();
@@ -663,12 +854,12 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlxsw_sp1_ptp_unmatched *unmatched;
- struct mlxsw_sp_ptp_state *ptp_state;
+ struct mlxsw_sp1_ptp_state *ptp_state;
struct rhashtable_iter iter;
u32 gc_cycle;
void *obj;
- ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
+ ptp_state = container_of(dwork, struct mlxsw_sp1_ptp_state, ht_gc_dw);
gc_cycle = ptp_state->gc_cycle++;
rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
@@ -694,7 +885,7 @@ static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
{
char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
- mlxsw_reg_mtptptp_pack(mtptpt_pl, trap_id, message_type);
+ mlxsw_reg_mtptpt_pack(mtptpt_pl, trap_id, message_type);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
}
@@ -807,10 +998,44 @@ static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 event_message_type;
+ int err;
+
+ /* Deliver these message types as PTP0. */
+ event_message_type = BIT(PTP_MSGTYPE_SYNC) |
+ BIT(PTP_MSGTYPE_DELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_RESP);
+
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ event_message_type);
+ if (err)
+ return err;
+
+ /* Everything else is PTP1. */
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+ ~event_message_type);
+ if (err)
+ goto err_mtptpt1_set;
+
+ return 0;
+
+err_mtptpt1_set:
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ return err;
+}
+
+static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+}
+
struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_ptp_state *ptp_state;
- u16 message_type;
+ struct mlxsw_sp1_ptp_state *ptp_state;
int err;
err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
@@ -820,7 +1045,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
- ptp_state->mlxsw_sp = mlxsw_sp;
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
spin_lock_init(&ptp_state->unmatched_lock);
@@ -829,22 +1054,9 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_hashtable_init;
- /* Delive these message types as PTP0. */
- message_type = BIT(PTP_MSGTYPE_SYNC) |
- BIT(PTP_MSGTYPE_DELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_RESP);
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
- message_type);
- if (err)
- goto err_mtptpt_set;
-
- /* Everything else is PTP1. */
- message_type = ~message_type;
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
- message_type);
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
if (err)
- goto err_mtptpt1_set;
+ goto err_ptp_traps_set;
err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
if (err)
@@ -853,28 +1065,28 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
MLXSW_SP1_PTP_HT_GC_INTERVAL);
- return ptp_state;
+ return &ptp_state->common;
err_fifo_clr:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
-err_mtptpt1_set:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
-err_mtptpt_set:
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+err_ptp_traps_set:
rhltable_destroy(&ptp_state->unmatched_ht);
err_hashtable_init:
kfree(ptp_state);
return ERR_PTR(err);
}
-void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
{
- struct mlxsw_sp *mlxsw_sp = ptp_state->mlxsw_sp;
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp1_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
rhltable_free_and_destroy(&ptp_state->unmatched_ht,
&mlxsw_sp1_ptp_unmatched_free_fn, NULL);
kfree(ptp_state);
@@ -887,9 +1099,10 @@ int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
- u16 *p_ing_types, u16 *p_egr_types,
- enum hwtstamp_rx_filters *p_rx_filter)
+static int
+mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
{
enum hwtstamp_rx_filters rx_filter = config->rx_filter;
enum hwtstamp_tx_types tx_type = config->tx_type;
@@ -1050,8 +1263,8 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 egr_types;
int err;
- err = mlxsw_sp_ptp_get_message_types(config, &ing_types, &egr_types,
- &rx_filter);
+ err = mlxsw_sp1_ptp_get_message_types(config, &ing_types, &egr_types,
+ &rx_filter);
if (err)
return err;
@@ -1144,3 +1357,354 @@ void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
*data++ = *(u64 *)(stats + offset);
}
}
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+ if (!ptp_state)
+ return ERR_PTR(-ENOMEM);
+
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
+ if (err)
+ goto err_ptp_traps_set;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ return &ptp_state->common;
+
+err_ptp_traps_set:
+ kfree(ptp_state);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+ kfree(ptp_state);
+}
+
+static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
+ u8 cqe_ts_sec)
+{
+ u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (cqe_ts_sec > (utc_sec & 0xff))
+ /* Time stamp above the last bits of UTC (UTC & 0xff) means the
+ * latter has wrapped after the time stamp was collected.
+ */
+ utc_sec -= 256;
+
+ utc_sec &= ~0xff;
+ utc_sec |= cqe_ts_sec;
+
+ return utc_sec;
+}
+
+static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_skb_cb *cb,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ u64 ts_sec, ts_nsec, nsec;
+
+ WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
+
+ /* The time stamp in the CQE is represented by 38 bits, which is a short
+ * representation of UTC time. Software should create the full time
+ * stamp using the global UTC clock. The seconds have only 8 bits in the
+ * CQE, to create the full time stamp, use the current UTC time and fix
+ * the seconds according to the relation between UTC seconds and CQE
+ * seconds.
+ */
+ ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
+ ts_nsec = cb->cqe_ts.nsec;
+
+ nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
+
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ *skb_hwtstamps(skb) = hwtstamps;
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ skb_tstamp_tx(skb, &hwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ *config = ptp_state->config;
+ return 0;
+}
+
+static int
+mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
+{
+ enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+ enum hwtstamp_tx_types tx_type = config->tx_type;
+ u16 ing_types = 0x00;
+ u16 egr_types = 0x00;
+
+ *p_rx_filter = rx_filter;
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ing_types = 0x00;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* In Spectrum-2 and above, all packets get time stamp by
+ * default and the driver fill the time stamp only for event
+ * packets. Return all event types even if only specific types
+ * were required.
+ */
+ ing_types = 0x0f;
+ *p_rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ egr_types = 0x00;
+ break;
+ case HWTSTAMP_TX_ON:
+ egr_types = 0x0f;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ *p_ing_types = ing_types;
+ *p_egr_types = egr_types;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
+ u16 ing_types, u16 egr_types)
+{
+ char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
+
+ mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
+ egr_types);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
+}
+
+static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
+ u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ing_types, u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ASSERT_RTNL();
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
+ egr_types, new_config);
+ if (err)
+ return err;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ASSERT_RTNL();
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
+ if (err)
+ goto err_ptp_disable;
+
+ return 0;
+
+err_ptp_disable:
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+ return err;
+}
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ enum hwtstamp_rx_filters rx_filter;
+ struct hwtstamp_config new_config;
+ u16 new_ing_types, new_egr_types;
+ bool ptp_enabled;
+ int err;
+
+ err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
+ &new_egr_types, &rx_filter);
+ if (err)
+ return err;
+
+ new_config.flags = config->flags;
+ new_config.tx_type = config->tx_type;
+ new_config.rx_filter = rx_filter;
+
+ ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
+ mlxsw_sp_port->ptp.egr_types;
+
+ if ((new_ing_types || new_egr_types) && !ptp_enabled) {
+ err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
+ new_egr_types, new_config);
+ if (err)
+ return err;
+ } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
+ err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
+ if (err)
+ return err;
+ }
+
+ mlxsw_sp_port->ptp.ing_types = new_ing_types;
+ mlxsw_sp_port->ptp.egr_types = new_egr_types;
+
+ /* Notify the ioctl caller what we are actually timestamping. */
+ config->rx_filter = rx_filter;
+
+ return 0;
+}
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
+ * their correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
+ */
+ if (!skb_vlan_tagged(skb)) {
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return -ENOMEM;
+ }
+ }
+
+ return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
+ tx_info);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c06cd1384bca..2d1628fdefc1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -57,6 +57,40 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port);
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -136,7 +170,14 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index)
{
}
-#endif
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
@@ -184,16 +225,25 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
-{
-}
-
static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
+{
+}
+
static inline int mlxsw_sp2_get_stats_count(void)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index ce33dbde124d..2c4443c6b964 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -443,65 +443,12 @@ struct mlxsw_sp_fib_entry_decap {
u32 tunnel_index;
};
-static struct mlxsw_sp_fib_entry_priv *
-mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- struct mlxsw_sp_fib_entry_priv *priv;
-
- if (!ll_ops->fib_entry_priv_size)
- /* No need to have priv */
- return NULL;
-
- priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
- if (!priv)
- return ERR_PTR(-ENOMEM);
- refcount_set(&priv->refcnt, 1);
- return priv;
-}
-
-static void
-mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
-{
- kfree(priv);
-}
-
-static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
-{
- refcount_inc(&priv->refcnt);
-}
-
-static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv || !refcount_dec_and_test(&priv->refcnt))
- return;
- mlxsw_sp_fib_entry_priv_destroy(priv);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv)
- return;
- mlxsw_sp_fib_entry_priv_hold(priv);
- list_add(&priv->list, &op_ctx->fib_entry_priv_list);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_priv *priv, *tmp;
-
- list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
- mlxsw_sp_fib_entry_priv_put(priv);
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
- struct mlxsw_sp_fib_entry_priv *priv;
};
struct mlxsw_sp_fib4_entry {
@@ -537,7 +484,6 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
- const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -551,45 +497,16 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
-static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- return 0;
-}
-
-static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
- xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
- xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
-}
-
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
- err = ll_ops->init(mlxsw_sp, vr->id, proto);
- if (err)
- return ERR_PTR(err);
-
lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
@@ -601,7 +518,6 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
- fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -640,36 +556,33 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralst_pl[MLXSW_REG_XRALST_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -677,20 +590,19 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -701,11 +613,12 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -716,15 +629,14 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -732,7 +644,6 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -746,7 +657,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -757,11 +668,8 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops =
- mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
-
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -851,23 +759,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -5780,14 +5688,13 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
+ case MLXSW_REG_RALUE_OP_WRITE_WRITE:
mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
default:
@@ -5795,140 +5702,39 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
}
}
-struct mlxsw_sp_fib_entry_op_ctx_basic {
- char ralue_pl[MLXSW_REG_RALUE_LEN];
-};
-
static void
-mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
+mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
+ const struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
- enum mlxsw_reg_ralxx_protocol ralxx_proto;
- char *ralue_pl = op_ctx_basic->ralue_pl;
- enum mlxsw_reg_ralue_op ralue_op;
-
- ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+ enum mlxsw_reg_ralxx_protocol proto;
+ u32 *p_dip;
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
+ proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
- switch (proto) {
+ switch (fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, (u32 *) addr);
+ p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ *p_dip);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, addr);
+ mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr);
break;
}
}
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
-}
-
-static int
-mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
- op_ctx_basic->ralue_pl);
-}
-
-static bool
-mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- return true;
-}
-
-static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
-{
- struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
-
- mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
- fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- fib_entry->fib_node->key.addr,
- fib_entry->priv);
-}
-
-static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- bool postponed_for_bulk = false;
- int err;
-
- err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
- if (!postponed_for_bulk)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- return err;
-}
-
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -5951,20 +5757,19 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
- adjacency_index, ecmp_size);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
u16 rif_index = 0;
@@ -5976,64 +5781,61 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+ rif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_pack(op_ctx);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
int err;
if (WARN_ON(!ipip_entry))
@@ -6045,55 +5847,54 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
- return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
- return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
- return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
- return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
+ fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
- return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
if (err)
return err;
@@ -6103,35 +5904,18 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- bool is_new)
-{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
- MLXSW_SP_FIB_ENTRY_OP_UPDATE);
-}
-
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
-
- if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
- return 0;
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- MLXSW_SP_FIB_ENTRY_OP_DELETE);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
static int
@@ -6226,12 +6010,6 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib4_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
if (err)
goto err_nexthop4_group_get;
@@ -6260,8 +6038,6 @@ err_fib4_entry_type_set:
err_nexthop_group_vr_link:
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
err_nexthop4_group_get:
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib4_entry);
return ERR_PTR(err);
}
@@ -6276,7 +6052,6 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
- mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
kfree(fib4_entry);
}
@@ -6514,16 +6289,14 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- bool is_new = !fib_node->fib_entry;
int err;
fib_node->fib_entry = fib_entry;
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_update;
@@ -6534,25 +6307,14 @@ err_fib_entry_update:
return err;
}
-static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- int err;
- err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
fib_node->fib_entry = NULL;
- return err;
-}
-
-static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
}
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
@@ -6574,7 +6336,6 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
@@ -6609,7 +6370,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
goto err_fib_node_entry_link;
@@ -6634,23 +6395,20 @@ err_fib4_entry_create:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry;
struct mlxsw_sp_fib_node *fib_node;
- int err;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
- return 0;
+ return;
fib_node = fib4_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
@@ -6958,9 +6716,9 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
-static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int
+mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
@@ -6983,8 +6741,7 @@ static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
- &fib6_entry->common, false);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_entry_update;
@@ -7008,7 +6765,6 @@ err_nexthop6_group_get:
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -7026,7 +6782,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
if (err)
goto err_rt6_unwind;
@@ -7045,7 +6801,6 @@ err_rt6_unwind:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -7063,7 +6818,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
}
static int
@@ -7149,12 +6904,6 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
INIT_LIST_HEAD(&fib6_entry->rt6_list);
for (i = 0; i < nrt6; i++) {
@@ -7196,8 +6945,6 @@ err_rt6_unwind:
list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -7220,7 +6967,6 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
WARN_ON(fib6_entry->nrt6);
- mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
kfree(fib6_entry);
}
@@ -7278,8 +7024,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
}
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
struct mlxsw_sp_fib_entry *replaced;
@@ -7318,7 +7064,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_node_entry_link;
@@ -7342,8 +7088,8 @@ err_fib6_entry_create:
}
static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -7371,7 +7117,8 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib6_entry, common);
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
@@ -7382,17 +7129,16 @@ err_fib6_entry_nexthop_add:
return err;
}
-static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
- int err;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
- return 0;
+ return;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
@@ -7401,22 +7147,22 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (!fib6_entry)
- return 0;
+ return;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (nrt6 != fib6_entry->nrt6) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
- return 0;
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ return;
}
fib_node = fib6_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static struct mlxsw_sp_mr_table *
@@ -7569,15 +7315,15 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
}
}
-struct mlxsw_sp_fib6_event {
+struct mlxsw_sp_fib6_event_work {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
-struct mlxsw_sp_fib_event {
- struct list_head list; /* node in fib queue */
+struct mlxsw_sp_fib_event_work {
+ struct work_struct work;
union {
- struct mlxsw_sp_fib6_event fib6_event;
+ struct mlxsw_sp_fib6_event_work fib6_work;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -7586,12 +7332,11 @@ struct mlxsw_sp_fib_event {
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
- int family;
};
static int
-mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
- struct fib6_entry_notifier_info *fen6_info)
+mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
+ struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
@@ -7605,8 +7350,8 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
if (!rt_arr)
return -ENOMEM;
- fib6_event->rt_arr = rt_arr;
- fib6_event->nrt6 = nrt6;
+ fib6_work->rt_arr = rt_arr;
+ fib6_work->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
@@ -7628,242 +7373,182 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
}
static void
-mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
+mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
{
int i;
- for (i = 0; i < fib6_event->nrt6; i++)
- mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
- kfree(fib6_event->rt_arr);
+ for (i = 0; i < fib6_work->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
+ kfree(fib6_work->rt_arr);
}
-static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
- &fib_event->fen_info);
+ &fib_work->fen_info);
}
- fib_info_put(fib_event->fen_info.fi);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- fib_info_put(fib_event->fen_info.fi);
+ mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
- fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
+ fib_work->fnh_info.fib_nh);
+ fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
{
- struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_APPEND:
- err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_del(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
bool replace;
int err;
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
- replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
+ replace);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
- mr_cache_put(fib_event->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
- mr_cache_put(fib_event->men_info.mfc);
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
- &fib_event->ven_info);
+ &fib_work->ven_info);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
- dev_put(fib_event->ven_info.dev);
+ dev_put(fib_work->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
- mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
- dev_put(fib_event->ven_info.dev);
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
+ &fib_work->ven_info);
+ dev_put(fib_work->ven_info.dev);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
-{
- struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
- struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
- struct mlxsw_sp_fib_event *next_fib_event;
- struct mlxsw_sp_fib_event *fib_event;
- int last_family = AF_UNSPEC;
- LIST_HEAD(fib_event_queue);
-
- spin_lock_bh(&router->fib_event_queue_lock);
- list_splice_init(&router->fib_event_queue, &fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
-
- /* Router lock is held here to make sure per-instance
- * operation context is not used in between FIB4/6 events
- * processing.
- */
- mutex_lock(&router->lock);
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- list_for_each_entry_safe(fib_event, next_fib_event,
- &fib_event_queue, list) {
- /* Check if the next entry in the queue exists and it is
- * of the same type (family and event) as the currect one.
- * In that case it is permitted to do the bulking
- * of multiple FIB entries to a single register write.
- */
- op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
- fib_event->family == next_fib_event->family &&
- fib_event->event == next_fib_event->event;
- op_ctx->event = fib_event->event;
-
- /* In case family of this and the previous entry are different, context
- * reinitialization is going to be needed now, indicate that.
- * Note that since last_family is initialized to AF_UNSPEC, this is always
- * going to happen for the first entry processed in the work.
- */
- if (fib_event->family != last_family)
- op_ctx->initialized = false;
-
- switch (fib_event->family) {
- case AF_INET:
- mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case AF_INET6:
- mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case RTNL_FAMILY_IP6MR:
- case RTNL_FAMILY_IPMR:
- /* Unlock here as inside FIBMR the lock is taken again
- * under RTNL. The per-instance operation context
- * is not used by FIBMR.
- */
- mutex_unlock(&router->lock);
- mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
- fib_event);
- mutex_lock(&router->lock);
- break;
- default:
- WARN_ON_ONCE(1);
- }
- last_family = fib_event->family;
- kfree(fib_event);
- cond_resched();
- }
- WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- mutex_unlock(&router->lock);
-}
-
-static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
- fib_event->fen_info = *fen_info;
+ fib_work->fen_info = *fen_info;
/* Take reference on fib_info to prevent it from being
- * freed while event is queued. Release it afterwards.
+ * freed while work is queued. Release it afterwards.
*/
- fib_info_hold(fib_event->fen_info.fi);
+ fib_info_hold(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
- fib_event->fnh_info = *fnh_info;
- fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
+ fib_work->fnh_info = *fnh_info;
+ fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
}
-static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
int err;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
- fen6_info);
+ err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
+ fen6_info);
if (err)
return err;
break;
@@ -7873,20 +7558,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
}
static void
-mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
- mr_cache_hold(fib_event->men_info.mfc);
+ memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
+ mr_cache_hold(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
- memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
- dev_hold(fib_event->ven_info.dev);
+ memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
+ dev_hold(fib_work->ven_info.dev);
break;
}
}
@@ -7940,7 +7625,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_fib_event *fib_event;
+ struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err;
@@ -7972,39 +7657,37 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
break;
}
- fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
- if (!fib_event)
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (!fib_work)
return NOTIFY_BAD;
- fib_event->mlxsw_sp = router->mlxsw_sp;
- fib_event->event = event;
- fib_event->family = info->family;
+ fib_work->mlxsw_sp = router->mlxsw_sp;
+ fib_work->event = event;
switch (info->family) {
case AF_INET:
- mlxsw_sp_router_fib4_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
+ mlxsw_sp_router_fib4_event(fib_work, info);
break;
case AF_INET6:
- err = mlxsw_sp_router_fib6_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
+ err = mlxsw_sp_router_fib6_event(fib_work, info);
if (err)
goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
- mlxsw_sp_router_fibmr_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
+ mlxsw_sp_router_fibmr_event(fib_work, info);
break;
}
- /* Enqueue the event and trigger the work */
- spin_lock_bh(&router->fib_event_queue_lock);
- list_add_tail(&fib_event->list, &router->fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
- mlxsw_core_schedule_work(&router->fib_event_work);
+ mlxsw_core_schedule_work(&fib_work->work);
return NOTIFY_DONE;
err_fib_event:
- kfree(fib_event);
+ kfree(fib_work);
return NOTIFY_BAD;
}
@@ -8463,6 +8146,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_counters_alloc(rif);
}
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return rif;
err_stats_enable:
@@ -8492,6 +8176,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
struct mlxsw_sp_vr *vr;
int i;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
@@ -8650,6 +8335,13 @@ static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
}
+static u64 mlxsw_sp_rifs_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rifs_count);
+}
+
static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
struct netlink_ext_ack *extack)
@@ -8898,9 +8590,7 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (netif_is_bridge_port(port_dev) ||
- netif_is_lag_port(port_dev) ||
- netif_is_ovs_port(port_dev))
+ if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
@@ -9624,17 +9314,18 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 efid;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
+ efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
- rif_subport->vid);
-
+ efid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9659,9 +9350,15 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
@@ -9673,7 +9370,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9697,10 +9394,9 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
.fid_get = mlxsw_sp_rif_subport_fid_get,
};
-static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
- enum mlxsw_reg_ritr_if_type type,
- u16 vid_fid, bool enable)
+static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -9708,7 +9404,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
- mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+ mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9732,10 +9428,9 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
return err;
rif->mac_profile_id = mac_profile;
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
- true);
+ err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
if (err)
- goto err_rif_vlan_fid_op;
+ goto err_rif_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
@@ -9752,9 +9447,15 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
@@ -9762,8 +9463,8 @@ err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
-err_rif_vlan_fid_op:
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
+err_rif_fid_op:
mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
@@ -9774,7 +9475,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9782,7 +9483,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
@@ -9859,11 +9560,119 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr,
+ rif->mac_profile_id, vid, efid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
+ struct netlink_ext_ack *extack)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
+ if (err)
+ goto err_rif_vlan_fid_op;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_mc_flood_set:
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_fid_rif_unset(rif->fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_fid_configure,
- .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .configure = mlxsw_sp1_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
+static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ u16 efid = mlxsw_sp_fid_index(rif->fid);
+
+ return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp2_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
@@ -9938,7 +9747,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
};
@@ -9981,6 +9790,7 @@ mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
if (err)
goto ul_rif_op_err;
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return ul_rif;
ul_rif_op_err:
@@ -9993,6 +9803,7 @@ static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
{
struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
kfree(ul_rif);
@@ -10124,7 +9935,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
@@ -10148,10 +9959,15 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- mlxsw_sp_rif_mac_profiles_occ_get,
- mlxsw_sp);
+ atomic_set(&mlxsw_sp->router->rifs_count, 0);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ mlxsw_sp_rif_mac_profiles_occ_get,
+ mlxsw_sp);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIFS,
+ mlxsw_sp_rifs_occ_get,
+ mlxsw_sp);
return 0;
}
@@ -10161,11 +9977,13 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
kfree(mlxsw_sp->router->rifs);
@@ -10546,46 +10364,6 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
- .init = mlxsw_sp_router_ll_basic_init,
- .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
- .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
-};
-
-static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
-{
- size_t max_size = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
- size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
-
- if (size > max_size)
- max_size = size;
- }
- router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
- GFP_KERNEL);
- if (!router->ll_op_ctx)
- return -ENOMEM;
- INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
- return 0;
-}
-
-static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
-{
- WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- kfree(router->ll_op_ctx);
-}
-
static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
{
u16 lb_rif_index;
@@ -10659,23 +10437,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_router_ops_init;
- err = mlxsw_sp_router_xm_init(mlxsw_sp);
- if (err)
- goto err_xm_init;
-
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
- &mlxsw_sp_router_ll_xm_ops :
- &mlxsw_sp_router_ll_basic_ops;
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
-
- err = mlxsw_sp_router_ll_op_ctx_init(router);
- if (err)
- goto err_ll_op_ctx_init;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
mlxsw_sp_nh_grp_activity_work);
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -10728,10 +10492,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
- INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
- INIT_LIST_HEAD(&router->fib_event_queue);
- spin_lock_init(&router->fib_event_queue_lock);
-
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
@@ -10786,7 +10546,6 @@ err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&router->fib_event_queue));
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
@@ -10810,10 +10569,6 @@ err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(router);
-err_ll_op_ctx_init:
- mlxsw_sp_router_xm_fini(mlxsw_sp);
-err_xm_init:
err_router_ops_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
@@ -10832,7 +10587,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -10844,8 +10598,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
- mlxsw_sp_router_xm_fini(mlxsw_sp);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 37411b74c3e6..c5dfb972b433 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -15,32 +15,12 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
-struct mlxsw_sp_fib_entry_op_ctx {
- u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk
- * the actual entry with the one that is the next
- * in queue.
- */
- initialized:1; /* Bit that the low-level op sets in case
- * the context priv is initialized.
- */
- struct list_head fib_entry_priv_list;
- unsigned long event;
- unsigned long ll_priv[];
-};
-
-static inline void
-mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list));
- memset(op_ctx, 0, sizeof(*op_ctx));
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
struct idr rif_mac_profiles_idr;
atomic_t rif_mac_profiles_count;
+ atomic_t rifs_count;
u8 max_rif_mac_profile;
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
@@ -72,14 +52,8 @@ struct mlxsw_sp_router {
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
- struct work_struct fib_event_work;
- struct list_head fib_event_queue;
- spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
- /* One set of ops for each protocol: IPv4 and IPv6 */
- const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
u16 lb_rif_index;
- struct mlxsw_sp_router_xm *xm;
const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
size_t adj_grp_size_ranges_count;
struct delayed_work nh_grp_activity_dw;
@@ -89,48 +63,6 @@ struct mlxsw_sp_router {
u32 adj_trap_index;
};
-struct mlxsw_sp_fib_entry_priv {
- refcount_t refcnt;
- struct list_head list; /* Member in op_ctx->fib_entry_priv_list */
- unsigned long priv[];
-};
-
-enum mlxsw_sp_fib_entry_op {
- MLXSW_SP_FIB_ENTRY_OP_WRITE,
- MLXSW_SP_FIB_ENTRY_OP_UPDATE,
- MLXSW_SP_FIB_ENTRY_OP_DELETE,
-};
-
-/* Low-level router ops. Basically this is to handle the different
- * register sets to work with ordinary and XM trees and FIB entries.
- */
-struct mlxsw_sp_router_ll_ops {
- int (*init)(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto);
- int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
- int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
- int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
- size_t fib_entry_op_ctx_size;
- size_t fib_entry_priv_size;
- void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len, unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv);
- void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size);
- void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif);
- void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx);
- void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr);
- int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk);
- bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
-};
-
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
@@ -150,7 +82,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
-u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
@@ -232,10 +163,4 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
struct net_device *
mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
-extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops;
-
-int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp);
-void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp);
-bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp);
-
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c
deleted file mode 100644
index d213af723a2a..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c
+++ /dev/null
@@ -1,812 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/rhashtable.h>
-
-#include "spectrum.h"
-#include "core.h"
-#include "reg.h"
-#include "spectrum_router.h"
-
-#define MLXSW_SP_ROUTER_XM_M_VAL 16
-
-static const u8 mlxsw_sp_router_xm_m_val[] = {
- [MLXSW_SP_L3_PROTO_IPV4] = MLXSW_SP_ROUTER_XM_M_VAL,
- [MLXSW_SP_L3_PROTO_IPV6] = 0, /* Currently unused. */
-};
-
-#define MLXSW_SP_ROUTER_XM_L_VAL_MAX 16
-
-struct mlxsw_sp_router_xm {
- bool ipv4_supported;
- bool ipv6_supported;
- unsigned int entries_size;
- struct rhashtable ltable_ht;
- struct rhashtable flush_ht; /* Stores items about to be flushed from cache */
- unsigned int flush_count;
- bool flush_all_mode;
-};
-
-struct mlxsw_sp_router_xm_ltable_node {
- struct rhash_head ht_node; /* Member of router_xm->ltable_ht */
- u16 mindex;
- u8 current_lvalue;
- refcount_t refcnt;
- unsigned int lvalue_ref[MLXSW_SP_ROUTER_XM_L_VAL_MAX + 1];
-};
-
-static const struct rhashtable_params mlxsw_sp_router_xm_ltable_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, mindex),
- .head_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, ht_node),
- .key_len = sizeof(u16),
- .automatic_shrinking = true,
-};
-
-struct mlxsw_sp_router_xm_flush_info {
- bool all;
- enum mlxsw_sp_l3proto proto;
- u16 virtual_router;
- u8 prefix_len;
- unsigned char addr[sizeof(struct in6_addr)];
-};
-
-struct mlxsw_sp_router_xm_fib_entry {
- bool committed;
- struct mlxsw_sp_router_xm_ltable_node *ltable_node; /* Parent node */
- u16 mindex; /* Store for processing from commit op */
- u8 lvalue;
- struct mlxsw_sp_router_xm_flush_info flush_info;
-};
-
-#define MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX \
- (MLXSW_REG_XMDR_TRANS_LEN / MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN)
-
-struct mlxsw_sp_fib_entry_op_ctx_xm {
- bool initialized;
- char xmdr_pl[MLXSW_REG_XMDR_LEN];
- unsigned int trans_offset; /* Offset of the current command within one
- * transaction of XMDR register.
- */
- unsigned int trans_item_len; /* The current command length. This is used
- * to advance 'trans_offset' when the next
- * command is appended.
- */
- unsigned int entries_count;
- struct mlxsw_sp_router_xm_fib_entry *entries[MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX];
-};
-
-static int mlxsw_sp_router_ll_xm_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- char rxlte_pl[MLXSW_REG_RXLTE_LEN];
-
- mlxsw_reg_rxlte_pack(rxlte_pl, vr_id,
- (enum mlxsw_reg_rxlte_protocol) proto, true);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxlte), rxlte_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralta), xralta_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralst), xralst_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xraltb), xraltb_pl);
-}
-
-static u16 mlxsw_sp_router_ll_xm_mindex_get4(const u32 addr)
-{
- /* Currently the M-index is set to linear mode. That means it is defined
- * as 16 MSB of IP address.
- */
- return addr >> MLXSW_SP_ROUTER_XM_L_VAL_MAX;
-}
-
-static u16 mlxsw_sp_router_ll_xm_mindex_get6(const unsigned char *addr)
-{
- WARN_ON_ONCE(1);
- return 0; /* currently unused */
-}
-
-static void mlxsw_sp_router_ll_xm_op_ctx_check_init(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- if (op_ctx->initialized)
- return;
- op_ctx->initialized = true;
-
- mlxsw_reg_xmdr_pack(op_ctx_xm->xmdr_pl, true);
- op_ctx_xm->trans_offset = 0;
- op_ctx_xm->entries_count = 0;
-}
-
-static void mlxsw_sp_router_ll_xm_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
- struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
- struct mlxsw_sp_router_xm_flush_info *flush_info;
- enum mlxsw_reg_xmdr_c_ltr_op xmdr_c_ltr_op;
- unsigned int len;
-
- mlxsw_sp_router_ll_xm_op_ctx_check_init(op_ctx, op_ctx_xm);
-
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_UPDATE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
-
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- len = mlxsw_reg_xmdr_c_ltr_pack4(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- op_ctx_xm->entries_count, xmdr_c_ltr_op,
- virtual_router, prefix_len, (u32 *) addr);
- fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get4(*((u32 *) addr));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- len = mlxsw_reg_xmdr_c_ltr_pack6(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- op_ctx_xm->entries_count, xmdr_c_ltr_op,
- virtual_router, prefix_len, addr);
- fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get6(addr);
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
- if (!op_ctx_xm->trans_offset)
- op_ctx_xm->trans_item_len = len;
- else
- WARN_ON_ONCE(op_ctx_xm->trans_item_len != len);
-
- op_ctx_xm->entries[op_ctx_xm->entries_count] = fib_entry;
-
- fib_entry->lvalue = prefix_len > mlxsw_sp_router_xm_m_val[proto] ?
- prefix_len - mlxsw_sp_router_xm_m_val[proto] : 0;
-
- flush_info = &fib_entry->flush_info;
- flush_info->proto = proto;
- flush_info->virtual_router = virtual_router;
- flush_info->prefix_len = prefix_len;
- if (addr)
- memcpy(flush_info->addr, addr, sizeof(flush_info->addr));
- else
- memset(flush_info->addr, 0, sizeof(flush_info->addr));
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_remote_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- trap_action, trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_local_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- trap_action, trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- tunnel_ptr);
-}
-
-static struct mlxsw_sp_router_xm_ltable_node *
-mlxsw_sp_router_xm_ltable_node_get(struct mlxsw_sp_router_xm *router_xm, u16 mindex)
-{
- struct mlxsw_sp_router_xm_ltable_node *ltable_node;
- int err;
-
- ltable_node = rhashtable_lookup_fast(&router_xm->ltable_ht, &mindex,
- mlxsw_sp_router_xm_ltable_ht_params);
- if (ltable_node) {
- refcount_inc(&ltable_node->refcnt);
- return ltable_node;
- }
- ltable_node = kzalloc(sizeof(*ltable_node), GFP_KERNEL);
- if (!ltable_node)
- return ERR_PTR(-ENOMEM);
- ltable_node->mindex = mindex;
- refcount_set(&ltable_node->refcnt, 1);
-
- err = rhashtable_insert_fast(&router_xm->ltable_ht, &ltable_node->ht_node,
- mlxsw_sp_router_xm_ltable_ht_params);
- if (err)
- goto err_insert;
-
- return ltable_node;
-
-err_insert:
- kfree(ltable_node);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_router_xm_ltable_node_put(struct mlxsw_sp_router_xm *router_xm,
- struct mlxsw_sp_router_xm_ltable_node *ltable_node)
-{
- if (!refcount_dec_and_test(&ltable_node->refcnt))
- return;
- rhashtable_remove_fast(&router_xm->ltable_ht, &ltable_node->ht_node,
- mlxsw_sp_router_xm_ltable_ht_params);
- kfree(ltable_node);
-}
-
-static int mlxsw_sp_router_xm_ltable_lvalue_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_ltable_node *ltable_node)
-{
- char xrmt_pl[MLXSW_REG_XRMT_LEN];
-
- mlxsw_reg_xrmt_pack(xrmt_pl, ltable_node->mindex, ltable_node->current_lvalue);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xrmt), xrmt_pl);
-}
-
-struct mlxsw_sp_router_xm_flush_node {
- struct rhash_head ht_node; /* Member of router_xm->flush_ht */
- struct list_head list;
- struct mlxsw_sp_router_xm_flush_info flush_info;
- struct delayed_work dw;
- struct mlxsw_sp *mlxsw_sp;
- unsigned long start_jiffies;
- unsigned int reuses; /* By how many flush calls this was reused. */
- refcount_t refcnt;
-};
-
-static const struct rhashtable_params mlxsw_sp_router_xm_flush_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, flush_info),
- .head_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, ht_node),
- .key_len = sizeof(struct mlxsw_sp_router_xm_flush_info),
- .automatic_shrinking = true,
-};
-
-static struct mlxsw_sp_router_xm_flush_node *
-mlxsw_sp_router_xm_cache_flush_node_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_info *flush_info)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
- int err;
-
- flush_node = kzalloc(sizeof(*flush_node), GFP_KERNEL);
- if (!flush_node)
- return ERR_PTR(-ENOMEM);
-
- flush_node->flush_info = *flush_info;
- err = rhashtable_insert_fast(&router_xm->flush_ht, &flush_node->ht_node,
- mlxsw_sp_router_xm_flush_ht_params);
- if (err) {
- kfree(flush_node);
- return ERR_PTR(err);
- }
- router_xm->flush_count++;
- flush_node->mlxsw_sp = mlxsw_sp;
- flush_node->start_jiffies = jiffies;
- refcount_set(&flush_node->refcnt, 1);
- return flush_node;
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_hold(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- if (!flush_node)
- return;
- refcount_inc(&flush_node->refcnt);
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_put(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- if (!flush_node || !refcount_dec_and_test(&flush_node->refcnt))
- return;
- kfree(flush_node);
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- router_xm->flush_count--;
- rhashtable_remove_fast(&router_xm->flush_ht, &flush_node->ht_node,
- mlxsw_sp_router_xm_flush_ht_params);
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
-}
-
-static u32 mlxsw_sp_router_xm_flush_mask4(u8 prefix_len)
-{
- return GENMASK(31, 32 - prefix_len);
-}
-
-static unsigned char *mlxsw_sp_router_xm_flush_mask6(u8 prefix_len)
-{
- static unsigned char mask[sizeof(struct in6_addr)];
-
- memset(mask, 0, sizeof(mask));
- memset(mask, 0xff, prefix_len / 8);
- mask[prefix_len / 8] = GENMASK(8, 8 - prefix_len % 8);
- return mask;
-}
-
-#define MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT 15
-#define MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES 15
-#define MLXSW_SP_ROUTER_XM_CACHE_DELAY 50 /* usecs */
-#define MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT (MLXSW_SP_ROUTER_XM_CACHE_DELAY * 10)
-
-static void mlxsw_sp_router_xm_cache_flush_work(struct work_struct *work)
-{
- struct mlxsw_sp_router_xm_flush_info *flush_info;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
- char rlcmld_pl[MLXSW_REG_RLCMLD_LEN];
- enum mlxsw_reg_rlcmld_select select;
- struct mlxsw_sp *mlxsw_sp;
- u32 addr4;
- int err;
-
- flush_node = container_of(work, struct mlxsw_sp_router_xm_flush_node,
- dw.work);
- mlxsw_sp = flush_node->mlxsw_sp;
- flush_info = &flush_node->flush_info;
-
- if (flush_info->all) {
- char rlpmce_pl[MLXSW_REG_RLPMCE_LEN];
-
- mlxsw_reg_rlpmce_pack(rlpmce_pl, true, false);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlpmce),
- rlpmce_pl);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
-
- if (flush_node->reuses <
- MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES)
- /* Leaving flush-all mode. */
- mlxsw_sp->router->xm->flush_all_mode = false;
- goto out;
- }
-
- select = MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES;
-
- switch (flush_info->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- addr4 = *((u32 *) flush_info->addr);
- addr4 &= mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len);
-
- /* In case the flush prefix length is bigger than M-value,
- * it makes no sense to flush M entries. So just flush
- * the ML entries.
- */
- if (flush_info->prefix_len > MLXSW_SP_ROUTER_XM_M_VAL)
- select = MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES;
-
- mlxsw_reg_rlcmld_pack4(rlcmld_pl, select,
- flush_info->virtual_router, addr4,
- mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rlcmld_pack6(rlcmld_pl, select,
- flush_info->virtual_router, flush_info->addr,
- mlxsw_sp_router_xm_flush_mask6(flush_info->prefix_len));
- break;
- default:
- WARN_ON(true);
- goto out;
- }
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlcmld), rlcmld_pl);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
-
-out:
- mlxsw_sp_router_xm_cache_flush_node_destroy(mlxsw_sp, flush_node);
-}
-
-static bool
-mlxsw_sp_router_xm_cache_flush_may_cancel(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- unsigned long max_wait = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT);
- unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
-
- /* In case there is the same flushing work pending, check
- * if we can consolidate with it. We can do it up to MAX_WAIT.
- * Cancel the delayed work. If the work was still pending.
- */
- if (time_is_before_jiffies(flush_node->start_jiffies + max_wait - delay) &&
- cancel_delayed_work_sync(&flush_node->dw))
- return true;
- return false;
-}
-
-static int
-mlxsw_sp_router_xm_cache_flush_schedule(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_info *flush_info)
-{
- unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
- struct mlxsw_sp_router_xm_flush_info flush_all_info = {.all = true};
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
-
- /* Check if the queued number of flushes reached critical amount after
- * which it is better to just flush the whole cache.
- */
- if (router_xm->flush_count == MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT)
- /* Entering flush-all mode. */
- router_xm->flush_all_mode = true;
-
- if (router_xm->flush_all_mode)
- flush_info = &flush_all_info;
-
- rcu_read_lock();
- flush_node = rhashtable_lookup_fast(&router_xm->flush_ht, flush_info,
- mlxsw_sp_router_xm_flush_ht_params);
- /* Take a reference so the object is not freed before possible
- * delayed work cancel could be done.
- */
- mlxsw_sp_router_xm_cache_flush_node_hold(flush_node);
- rcu_read_unlock();
-
- if (flush_node && mlxsw_sp_router_xm_cache_flush_may_cancel(flush_node)) {
- flush_node->reuses++;
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
- /* Original work was within wait period and was canceled.
- * That means that the reference is still held and the
- * flush_node_put() call above did not free the flush_node.
- * Reschedule it with fresh delay.
- */
- goto schedule_work;
- } else {
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
- }
-
- flush_node = mlxsw_sp_router_xm_cache_flush_node_create(mlxsw_sp, flush_info);
- if (IS_ERR(flush_node))
- return PTR_ERR(flush_node);
- INIT_DELAYED_WORK(&flush_node->dw, mlxsw_sp_router_xm_cache_flush_work);
-
-schedule_work:
- mlxsw_core_schedule_dw(&flush_node->dw, delay);
- return 0;
-}
-
-static int
-mlxsw_sp_router_xm_ml_entry_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_fib_entry *fib_entry)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_ltable_node *ltable_node;
- u8 lvalue = fib_entry->lvalue;
- int err;
-
- ltable_node = mlxsw_sp_router_xm_ltable_node_get(router_xm,
- fib_entry->mindex);
- if (IS_ERR(ltable_node))
- return PTR_ERR(ltable_node);
- if (lvalue > ltable_node->current_lvalue) {
- /* The L-value is bigger then the one currently set, update. */
- ltable_node->current_lvalue = lvalue;
- err = mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp,
- ltable_node);
- if (err)
- goto err_lvalue_set;
-
- /* The L value for prefix/M is increased.
- * Therefore, all entries in M and ML caches matching
- * {prefix/M, proto, VR} need to be flushed. Set the flush
- * prefix length to M to achieve that.
- */
- fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
- }
-
- ltable_node->lvalue_ref[lvalue]++;
- fib_entry->ltable_node = ltable_node;
-
- return 0;
-
-err_lvalue_set:
- mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
- return err;
-}
-
-static void
-mlxsw_sp_router_xm_ml_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_fib_entry *fib_entry)
-{
- struct mlxsw_sp_router_xm_ltable_node *ltable_node =
- fib_entry->ltable_node;
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- u8 lvalue = fib_entry->lvalue;
-
- ltable_node->lvalue_ref[lvalue]--;
- if (lvalue == ltable_node->current_lvalue && lvalue &&
- !ltable_node->lvalue_ref[lvalue]) {
- u8 new_lvalue = lvalue - 1;
-
- /* Find the biggest L-value left out there. */
- while (new_lvalue > 0 && !ltable_node->lvalue_ref[lvalue])
- new_lvalue--;
-
- ltable_node->current_lvalue = new_lvalue;
- mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp, ltable_node);
-
- /* The L value for prefix/M is decreased.
- * Therefore, all entries in M and ML caches matching
- * {prefix/M, proto, VR} need to be flushed. Set the flush
- * prefix length to M to achieve that.
- */
- fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
- }
- mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
-}
-
-static int
-mlxsw_sp_router_xm_ml_entries_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int err;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- err = mlxsw_sp_router_xm_ml_entry_add(mlxsw_sp, fib_entry);
- if (err)
- goto rollback;
- }
- return 0;
-
-rollback:
- for (i--; i >= 0; i--) {
- fib_entry = op_ctx_xm->entries[i];
- mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
- }
- return err;
-}
-
-static void
-mlxsw_sp_router_xm_ml_entries_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
- }
-}
-
-static void
-mlxsw_sp_router_xm_ml_entries_cache_flush(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int err;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- err = mlxsw_sp_router_xm_cache_flush_schedule(mlxsw_sp,
- &fib_entry->flush_info);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
- }
-}
-
-static int mlxsw_sp_router_ll_xm_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- u8 num_rec;
- int err;
- int i;
-
- op_ctx_xm->trans_offset += op_ctx_xm->trans_item_len;
- op_ctx_xm->entries_count++;
-
- /* Check if bulking is possible and there is still room for another
- * FIB entry record. The size of 'trans_item_len' is either size of IPv4
- * command or size of IPv6 command. Not possible to mix those in a
- * single XMDR write.
- */
- if (op_ctx->bulk_ok &&
- op_ctx_xm->trans_offset + op_ctx_xm->trans_item_len <= MLXSW_REG_XMDR_TRANS_LEN) {
- if (postponed_for_bulk)
- *postponed_for_bulk = true;
- return 0;
- }
-
- if (op_ctx->event == FIB_EVENT_ENTRY_REPLACE) {
- /* The L-table is updated inside. It has to be done before
- * the prefix is inserted.
- */
- err = mlxsw_sp_router_xm_ml_entries_add(mlxsw_sp, op_ctx_xm);
- if (err)
- goto out;
- }
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xmdr), op_ctx_xm->xmdr_pl);
- if (err)
- goto out;
- num_rec = mlxsw_reg_xmdr_num_rec_get(op_ctx_xm->xmdr_pl);
- if (num_rec > op_ctx_xm->entries_count) {
- dev_err(mlxsw_sp->bus_info->dev, "Invalid XMDR number of records\n");
- err = -EIO;
- goto out;
- }
- for (i = 0; i < num_rec; i++) {
- if (!mlxsw_reg_xmdr_reply_vect_get(op_ctx_xm->xmdr_pl, i)) {
- dev_err(mlxsw_sp->bus_info->dev, "Command send over XMDR failed\n");
- err = -EIO;
- goto out;
- } else {
- fib_entry = op_ctx_xm->entries[i];
- fib_entry->committed = true;
- }
- }
-
- if (op_ctx->event == FIB_EVENT_ENTRY_DEL)
- /* The L-table is updated inside. It has to be done after
- * the prefix was removed.
- */
- mlxsw_sp_router_xm_ml_entries_del(mlxsw_sp, op_ctx_xm);
-
- /* At the very end, do the XLT cache flushing to evict stale
- * M and ML cache entries after prefixes were inserted/removed.
- */
- mlxsw_sp_router_xm_ml_entries_cache_flush(mlxsw_sp, op_ctx_xm);
-
-out:
- /* Next pack call is going to do reinitialization */
- op_ctx->initialized = false;
- return err;
-}
-
-static bool mlxsw_sp_router_ll_xm_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
-
- return fib_entry->committed;
-}
-
-const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops = {
- .init = mlxsw_sp_router_ll_xm_init,
- .ralta_write = mlxsw_sp_router_ll_xm_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_xm_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_xm_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_xm),
- .fib_entry_priv_size = sizeof(struct mlxsw_sp_router_xm_fib_entry),
- .fib_entry_pack = mlxsw_sp_router_ll_xm_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_xm_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_xm_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_xm_fib_entry_is_committed,
-};
-
-#define MLXSW_SP_ROUTER_XM_MINDEX_SIZE (64 * 1024)
-
-int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm;
- char rxltm_pl[MLXSW_REG_RXLTM_LEN];
- char xltq_pl[MLXSW_REG_XLTQ_LEN];
- u32 mindex_size;
- u16 device_id;
- int err;
-
- if (!mlxsw_sp->bus_info->xm_exists)
- return 0;
-
- router_xm = kzalloc(sizeof(*router_xm), GFP_KERNEL);
- if (!router_xm)
- return -ENOMEM;
-
- mlxsw_reg_xltq_pack(xltq_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(xltq), xltq_pl);
- if (err)
- goto err_xltq_query;
- mlxsw_reg_xltq_unpack(xltq_pl, &device_id, &router_xm->ipv4_supported,
- &router_xm->ipv6_supported, &router_xm->entries_size, &mindex_size);
-
- if (device_id != MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT) {
- dev_err(mlxsw_sp->bus_info->dev, "Invalid XM device id\n");
- err = -EINVAL;
- goto err_device_id_check;
- }
-
- if (mindex_size != MLXSW_SP_ROUTER_XM_MINDEX_SIZE) {
- dev_err(mlxsw_sp->bus_info->dev, "Unexpected M-index size\n");
- err = -EINVAL;
- goto err_mindex_size_check;
- }
-
- mlxsw_reg_rxltm_pack(rxltm_pl, mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV4],
- mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV6]);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxltm), rxltm_pl);
- if (err)
- goto err_rxltm_write;
-
- err = rhashtable_init(&router_xm->ltable_ht, &mlxsw_sp_router_xm_ltable_ht_params);
- if (err)
- goto err_ltable_ht_init;
-
- err = rhashtable_init(&router_xm->flush_ht, &mlxsw_sp_router_xm_flush_ht_params);
- if (err)
- goto err_flush_ht_init;
-
- mlxsw_sp->router->xm = router_xm;
- return 0;
-
-err_flush_ht_init:
- rhashtable_destroy(&router_xm->ltable_ht);
-err_ltable_ht_init:
-err_rxltm_write:
-err_mindex_size_check:
-err_device_id_check:
-err_xltq_query:
- kfree(router_xm);
- return err;
-}
-
-void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- if (!mlxsw_sp->bus_info->xm_exists)
- return;
-
- rhashtable_destroy(&router_xm->flush_ht);
- rhashtable_destroy(&router_xm->ltable_ht);
- kfree(router_xm);
-}
-
-bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- return router_xm && router_xm->ipv4_supported;
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index fe663b0ab708..39904dacf4f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -106,8 +106,8 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_init;
- devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
- mlxsw_sp_span_occ_get, mlxsw_sp);
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
+ mlxsw_sp_span_occ_get, mlxsw_sp);
INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
@@ -123,7 +123,7 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
cancel_work_sync(&mlxsw_sp->span->work);
- devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a6d2e806cba9..4efccd942fb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -48,7 +48,8 @@ struct mlxsw_sp_bridge_device {
struct net_device *dev;
struct list_head list;
struct list_head ports_list;
- struct list_head mids_list;
+ struct list_head mdb_list;
+ struct rhashtable mdb_ht;
u8 vlan_enabled:1,
multicast_enabled:1,
mrouter:1;
@@ -102,6 +103,33 @@ struct mlxsw_sp_switchdev_ops {
void (*init)(struct mlxsw_sp *mlxsw_sp);
};
+struct mlxsw_sp_mdb_entry_key {
+ unsigned char addr[ETH_ALEN];
+ u16 fid;
+};
+
+struct mlxsw_sp_mdb_entry {
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mdb_entry_key key;
+ u16 mid;
+ struct list_head ports_list;
+ u16 ports_count;
+};
+
+struct mlxsw_sp_mdb_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+ refcount_t refcount;
+ bool mrouter;
+};
+
+static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
+};
+
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
@@ -109,12 +137,13 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port);
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device
- *bridge_device);
+ *bridge_device, bool mc_enabled);
static void
mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -237,6 +266,10 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
if (!bridge_device)
return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_mdb_rhashtable_init;
+
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
@@ -254,7 +287,8 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
- INIT_LIST_HEAD(&bridge_device->mids_list);
+ INIT_LIST_HEAD(&bridge_device->mdb_list);
+
if (list_empty(&bridge->bridges_list))
mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
list_add(&bridge_device->list, &bridge->bridges_list);
@@ -273,6 +307,8 @@ err_vxlan_init:
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
+ rhashtable_destroy(&bridge_device->mdb_ht);
+err_mdb_rhashtable_init:
kfree(bridge_device);
return ERR_PTR(err);
}
@@ -290,7 +326,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
- WARN_ON(!list_empty(&bridge_device->mids_list));
+ WARN_ON(!list_empty(&bridge_device->mdb_list));
+ rhashtable_destroy(&bridge_device->mdb_ht);
kfree(bridge_device);
}
@@ -643,6 +680,64 @@ err_port_bridge_vlan_flood_set:
}
static int
+mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type, local_port, member);
+ if (err)
+ goto err_fid_flood_set;
+ }
+
+ return 0;
+
+err_fid_flood_set:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &bridge_vlan->port_vlan_list,
+ list) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
+ local_port, !member);
+ }
+
+ return err;
+}
+
+static int
+mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ member);
+ if (err)
+ goto err_bridge_vlans_flood_set;
+ }
+
+ return 0;
+
+err_bridge_vlans_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ !member);
+ return err;
+}
+
+static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
bool set)
@@ -813,6 +908,9 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port)
return 0;
+ mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
+ is_port_mrouter);
+
if (!bridge_port->bridge_device->multicast_enabled)
goto out;
@@ -822,8 +920,6 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
- is_port_mrouter);
out:
bridge_port->mrouter = is_port_mrouter;
return 0;
@@ -842,6 +938,7 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *orig_dev,
bool mc_disabled)
{
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
@@ -854,43 +951,184 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_device)
return 0;
- if (bridge_device->multicast_enabled != !mc_disabled) {
- bridge_device->multicast_enabled = !mc_disabled;
- mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
- bridge_device);
- }
+ if (bridge_device->multicast_enabled == !mc_disabled)
+ return 0;
+
+ bridge_device->multicast_enabled = !mc_disabled;
+ err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ !mc_disabled);
+ if (err)
+ goto err_mc_enable_sync;
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
- enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
bool member = mlxsw_sp_mc_flood(bridge_port);
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
- bridge_port,
- packet_type, member);
+ err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
+ packet_type,
+ member);
if (err)
- return err;
+ goto err_flood_table_set;
}
- bridge_device->multicast_enabled = !mc_disabled;
-
return 0;
+
+err_flood_table_set:
+ list_for_each_entry_continue_reverse(bridge_port,
+ &bridge_device->ports_list, list) {
+ bool member = mlxsw_sp_mc_flood(bridge_port);
+
+ mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
+ !member);
+ }
+ mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ mc_disabled);
+err_mc_enable_sync:
+ bridge_device->multicast_enabled = mc_disabled;
+ return err;
+}
+
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
+ if (mdb_entry_port->local_port == local_port)
+ return mdb_entry_port;
+ }
+
+ return NULL;
}
-static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
- u16 mid_idx, bool add)
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
{
- char *smid2_pl;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
int err;
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count++;
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
- mlxsw_sp_router_port(mlxsw_sp), add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+ mdb_entry->ports_count++;
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port, bool force)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count--;
+ return;
+ }
+
+ mdb_entry->ports_count--;
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+}
+
+static __always_unused struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ int err;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (!mdb_entry_port->mrouter)
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ mdb_entry_port->mrouter = true;
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static __always_unused void
+mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!mdb_entry_port->mrouter)
+ return;
+
+ mdb_entry_port->mrouter = false;
+ if (!refcount_dec_and_test(&mdb_entry_port->refcount))
+ return;
+
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
}
static void
@@ -898,10 +1136,17 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device,
bool add)
{
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- list_for_each_entry(mid, &bridge_device->mids_list, list)
- mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
+ }
}
static int
@@ -1127,14 +1372,13 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
struct mlxsw_sp_bridge_vlan *bridge_vlan;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid = mlxsw_sp_port_vlan->vid;
- bool last_port, last_vlan;
+ bool last_port;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
- last_vlan = list_is_singular(&bridge_port->vlans_list);
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
last_port = list_is_singular(&bridge_vlan->port_vlan_list);
@@ -1146,8 +1390,9 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
bridge_port,
mlxsw_sp_fid_index(fid));
- if (last_vlan)
- mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
+
+ mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
+ mlxsw_sp_fid_index(fid));
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
@@ -1436,7 +1681,8 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
}
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
+ const char *mac, u16 fid, u16 vid,
+ bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
{
@@ -1449,7 +1695,8 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
+ local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1464,18 +1711,18 @@ out:
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
- bool dynamic)
+ const char *mac, u16 fid, u16 vid,
+ bool adding, bool dynamic)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
+ adding, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
@@ -1537,7 +1784,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
bridge_port->system_port,
- fdb_info->addr, fid_index,
+ fdb_info->addr, fid_index, vid,
adding, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
@@ -1546,8 +1793,9 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
vid, adding, false);
}
-static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
- u16 fid, u16 mid_idx, bool adding)
+static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mdb_entry *mdb_entry,
+ bool adding)
{
char *sfd_pl;
u8 num_rec;
@@ -1558,8 +1806,9 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
+ mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
+ mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mdb_entry->mid);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1573,79 +1822,17 @@ out:
return err;
}
-static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
- long *ports_bitmap,
- bool set_router_port)
-{
- char *smid2_pl;
- int err, i;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
- if (mlxsw_sp->ports[i])
- mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
- }
-
- mlxsw_reg_smid2_port_mask_set(smid2_pl,
- mlxsw_sp_router_port(mlxsw_sp), 1);
-
- for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
- mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
-
- mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
- set_router_port);
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 mid_idx, bool add)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *smid2_pl;
- int err;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
-{
- struct mlxsw_sp_mid *mid;
-
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
- return mid;
- }
- return NULL;
-}
-
static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
- unsigned long *ports_bitmap)
+ struct mlxsw_sp_ports_bitmap *ports_bm)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u64 max_lag_members, i;
int lag_id;
if (!bridge_port->lagged) {
- set_bit(bridge_port->system_port, ports_bitmap);
+ set_bit(bridge_port->system_port, ports_bm->bitmap);
} else {
max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_LAG_MEMBERS);
@@ -1655,13 +1842,13 @@ mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
lag_id, i);
if (mlxsw_sp_port)
set_bit(mlxsw_sp_port->local_port,
- ports_bitmap);
+ ports_bm->bitmap);
}
}
}
static void
-mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
+mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp *mlxsw_sp)
{
@@ -1671,116 +1858,226 @@ mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
if (bridge_port->mrouter) {
mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
bridge_port,
- flood_bitmap);
+ flood_bm);
}
}
}
-static bool
-mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid,
- struct mlxsw_sp_bridge_device *bridge_device)
+static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ unsigned int nbits = ports_bm->nbits;
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, nbits) {
+ mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
+ mdb_entry,
+ i);
+ if (IS_ERR(mdb_entry_port)) {
+ nbits = i;
+ goto err_mrouter_port_get;
+ }
+ }
+
+ return 0;
+
+err_mrouter_port_get:
+ for_each_set_bit(i, ports_bm->bitmap, nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+ return PTR_ERR(mdb_entry_port);
+}
+
+static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+}
+
+static int
+mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
{
- long *flood_bitmap;
- int num_of_ports;
- u16 mid_idx;
+ struct mlxsw_sp_ports_bitmap ports_bm;
int err;
- mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
- MLXSW_SP_MID_MAX);
- if (mid_idx == MLXSW_SP_MID_MAX)
- return false;
+ err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
+ if (err)
+ return err;
- num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
- if (!flood_bitmap)
- return false;
+ mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
+
+ if (add)
+ err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
+ mdb_entry);
+ else
+ mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
+
+ mlxsw_sp_port_bitmap_fini(&ports_bm);
+ return err;
+}
- bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
- mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
- mid->mid = mid_idx;
- err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
- bridge_device->mrouter);
- bitmap_free(flood_bitmap);
+ ether_addr_copy(mdb_entry->key.addr, addr);
+ mdb_entry->key.fid = fid;
+ err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
if (err)
- return false;
+ goto err_pgt_mid_alloc;
+
+ INIT_LIST_HEAD(&mdb_entry->ports_list);
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
- true);
+ err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
+ true);
if (err)
- return false;
+ goto err_mdb_mrouters_set;
- set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = true;
- return true;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port)) {
+ err = PTR_ERR(mdb_entry_port);
+ goto err_mdb_entry_port_get;
+ }
+
+ if (bridge_device->multicast_enabled) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
+ if (err)
+ goto err_mdb_entry_write;
+ }
+
+ err = rhashtable_insert_fast(&bridge_device->mdb_ht,
+ &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
+
+ return mdb_entry;
+
+err_rhashtable_insert:
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+err_mdb_entry_write:
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
+err_mdb_entry_port_get:
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+err_mdb_mrouters_set:
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+err_pgt_mid_alloc:
+ kfree(mdb_entry);
+ return ERR_PTR(err);
}
-static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ u16 local_port, bool force)
{
- if (!mid->in_hw)
- return 0;
-
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = false;
- return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
- false);
+ list_del(&mdb_entry->list);
+ rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+ WARN_ON(!list_empty(&mdb_entry->ports_list));
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+ kfree(mdb_entry);
}
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
{
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- mid = kzalloc(sizeof(*mid), GFP_KERNEL);
- if (!mid)
- return NULL;
+ ether_addr_copy(key.addr, addr);
+ key.fid = fid;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (mdb_entry) {
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
- GFP_KERNEL);
- if (!mid->ports_in_mid)
- goto err_ports_in_mid_alloc;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
+ mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port))
+ return ERR_CAST(mdb_entry_port);
- ether_addr_copy(mid->addr, addr);
- mid->fid = fid;
- mid->in_hw = false;
+ return mdb_entry;
+ }
- if (!bridge_device->multicast_enabled)
- goto out;
+ return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
+ local_port);
+}
- if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
- goto err_write_mdb_entry;
+static bool
+mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_mdb_entry_port *removed_entry_port,
+ bool force)
+{
+ if (mdb_entry->ports_count > 1)
+ return false;
-out:
- list_add_tail(&mid->list, &bridge_device->mids_list);
- return mid;
+ if (force)
+ return true;
-err_write_mdb_entry:
- bitmap_free(mid->ports_in_mid);
-err_ports_in_mid_alloc:
- kfree(mid);
- return NULL;
+ if (!removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 1)
+ return false;
+
+ if (removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 2)
+ return false;
+
+ return true;
}
-static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
+ bool force)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err = 0;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
- if (bitmap_empty(mid->ports_in_mid,
- mlxsw_core_max_ports(mlxsw_sp->core))) {
- err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
- list_del(&mid->list);
- bitmap_free(mid->ports_in_mid);
- kfree(mid);
- }
- return err;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ /* Avoid a temporary situation in which the MDB entry points to an empty
+ * PGT entry, as otherwise packets will be temporarily dropped instead
+ * of being flooded. Instead, in this situation, call
+ * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
+ * then releases the PGT entry.
+ */
+ if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
+ mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
+ local_port, force);
+ else
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
+ force);
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1789,12 +2086,10 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
- int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
@@ -1809,54 +2104,35 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
- fid_index);
- if (!mid) {
- netdev_err(dev, "Unable to allocate MC group\n");
- return -ENOMEM;
- }
- }
- set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
-
- if (!bridge_device->multicast_enabled)
- return 0;
-
- if (bridge_port->mrouter)
- return 0;
-
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
- if (err) {
- netdev_err(dev, "Unable to set SMID\n");
- goto err_out;
- }
+ mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
+ mdb->addr, fid_index,
+ mlxsw_sp_port->local_port);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
return 0;
-
-err_out:
- mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- return err;
}
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_device
- *bridge_device)
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool mc_enabled)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_mid *mid;
- bool mc_enabled;
-
- mc_enabled = bridge_device->multicast_enabled;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (mc_enabled)
- mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
- bridge_device);
- else
- mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
+ if (err)
+ goto err_mdb_entry_write;
}
+ return 0;
+
+err_mdb_entry_write:
+ list_for_each_entry_continue_reverse(mdb_entry,
+ &bridge_device->mdb_list, list)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
+ return err;
}
static void
@@ -1864,14 +2140,20 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
bool add)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_port->local_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
}
}
@@ -1949,28 +2231,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int
-__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_mid *mid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- if (bridge_port->bridge_device->multicast_enabled &&
- !bridge_port->mrouter) {
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- if (err)
- netdev_err(dev, "Unable to remove port from SMID\n");
- }
-
- err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- if (err)
- netdev_err(dev, "Unable to remove MC SFD\n");
-
- return err;
-}
-
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -1980,7 +2240,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
@@ -1996,32 +2257,44 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
+ ether_addr_copy(key.addr, mdb->addr);
+ key.fid = fid_index;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (!mdb_entry) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
}
- return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ mlxsw_sp_port->local_port, false);
+ return 0;
}
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid, *tmp;
+ struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
+ u16 local_port = mlxsw_sp_port->local_port;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
- if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
- __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
- mid);
- } else if (bridge_device->multicast_enabled &&
- bridge_port->mrouter) {
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- }
+ list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
+ list) {
+ if (mdb_entry->key.fid != fid_index)
+ continue;
+
+ if (bridge_port->mrouter)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
+ mdb_entry,
+ local_port);
+
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ local_port, true);
}
}
@@ -2633,10 +2906,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ u16 local_port, vid, fid, evid = 0;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
- u16 local_port;
- u16 vid, fid;
bool do_notification = true;
int err;
@@ -2667,9 +2939,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ evid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
- err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
+ err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
adding, true);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
@@ -2729,8 +3002,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
- lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
- mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index ed4d0d3448f3..f4bfdb6dab9c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -953,16 +953,16 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
MIRROR),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ARPBC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
MIRROR),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ARPUC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
@@ -1298,8 +1298,8 @@ static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->policers_count; i++) {
policer_item = &trap->policer_items_arr[i];
- err = devlink_trap_policers_register(devlink,
- &policer_item->policer, 1);
+ err = devl_trap_policers_register(devlink,
+ &policer_item->policer, 1);
if (err)
goto err_trap_policer_register;
}
@@ -1309,8 +1309,8 @@ static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
err_trap_policer_register:
for (i--; i >= 0; i--) {
policer_item = &trap->policer_items_arr[i];
- devlink_trap_policers_unregister(devlink,
- &policer_item->policer, 1);
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
}
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
return err;
@@ -1325,8 +1325,8 @@ static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
for (i = trap->policers_count - 1; i >= 0; i--) {
policer_item = &trap->policer_items_arr[i];
- devlink_trap_policers_unregister(devlink,
- &policer_item->policer, 1);
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
}
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
}
@@ -1381,8 +1381,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->groups_count; i++) {
group_item = &trap->group_items_arr[i];
- err = devlink_trap_groups_register(devlink, &group_item->group,
- 1);
+ err = devl_trap_groups_register(devlink, &group_item->group, 1);
if (err)
goto err_trap_group_register;
}
@@ -1392,7 +1391,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
err_trap_group_register:
for (i--; i >= 0; i--) {
group_item = &trap->group_items_arr[i];
- devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
}
mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
return err;
@@ -1408,7 +1407,7 @@ static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_group_item *group_item;
group_item = &trap->group_items_arr[i];
- devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
}
mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
}
@@ -1469,8 +1468,8 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->traps_count; i++) {
trap_item = &trap->trap_items_arr[i];
- err = devlink_traps_register(devlink, &trap_item->trap, 1,
- mlxsw_sp);
+ err = devl_traps_register(devlink, &trap_item->trap, 1,
+ mlxsw_sp);
if (err)
goto err_trap_register;
}
@@ -1480,7 +1479,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
err_trap_register:
for (i--; i >= 0; i--) {
trap_item = &trap->trap_items_arr[i];
- devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
}
mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
return err;
@@ -1496,7 +1495,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_item *trap_item;
trap_item = &trap->trap_items_arr[i];
- devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
}
mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index d888498aed33..8da169663bda 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -27,8 +27,6 @@ enum {
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
- MLXSW_TRAP_ID_ARPBC = 0x50,
- MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -71,6 +69,8 @@ enum {
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
+ MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index c8fe8b31f07b..b1c74e6cb012 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -155,8 +155,8 @@ static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
return 0;
}
-static int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
- u16 timeout)
+int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
+ u16 timeout)
{
u16 timeout_cnt = 0;
u32 val;
@@ -192,7 +192,7 @@ static int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
return 0;
}
-static void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
+void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
{
u32 val;
@@ -1149,7 +1149,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ if (adapter->is_pci11x1x)
+ wol->supported |= WAKE_MAGICSECURE;
+
wol->wolopts |= adapter->wolopts;
+ if (adapter->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, adapter->sopass, sizeof(wol->sopass));
}
static int lan743x_ethtool_set_wol(struct net_device *netdev,
@@ -1170,6 +1175,13 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
adapter->wolopts |= WAKE_PHY;
if (wol->wolopts & WAKE_ARP)
adapter->wolopts |= WAKE_ARP;
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ wol->wolopts & WAKE_MAGIC) {
+ memcpy(adapter->sopass, wol->sopass, sizeof(wol->sopass));
+ adapter->wolopts |= WAKE_MAGICSECURE;
+ } else {
+ memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
+ }
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
@@ -1178,6 +1190,49 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
}
#endif /* CONFIG_PM */
+static void lan743x_common_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+
+{
+ struct lan743x_adapter *adapter = netdev_priv(dev);
+ u32 *rb = p;
+
+ memset(p, 0, (MAX_LAN743X_ETH_REGS * sizeof(u32)));
+
+ rb[ETH_PRIV_FLAGS] = adapter->flags;
+ rb[ETH_ID_REV] = lan743x_csr_read(adapter, ID_REV);
+ rb[ETH_FPGA_REV] = lan743x_csr_read(adapter, FPGA_REV);
+ rb[ETH_STRAP_READ] = lan743x_csr_read(adapter, STRAP_READ);
+ rb[ETH_INT_STS] = lan743x_csr_read(adapter, INT_STS);
+ rb[ETH_HW_CFG] = lan743x_csr_read(adapter, HW_CFG);
+ rb[ETH_PMT_CTL] = lan743x_csr_read(adapter, PMT_CTL);
+ rb[ETH_E2P_CMD] = lan743x_csr_read(adapter, E2P_CMD);
+ rb[ETH_E2P_DATA] = lan743x_csr_read(adapter, E2P_DATA);
+ rb[ETH_MAC_CR] = lan743x_csr_read(adapter, MAC_CR);
+ rb[ETH_MAC_RX] = lan743x_csr_read(adapter, MAC_RX);
+ rb[ETH_MAC_TX] = lan743x_csr_read(adapter, MAC_TX);
+ rb[ETH_FLOW] = lan743x_csr_read(adapter, MAC_FLOW);
+ rb[ETH_MII_ACC] = lan743x_csr_read(adapter, MAC_MII_ACC);
+ rb[ETH_MII_DATA] = lan743x_csr_read(adapter, MAC_MII_DATA);
+ rb[ETH_EEE_TX_LPI_REQ_DLY] = lan743x_csr_read(adapter,
+ MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ rb[ETH_WUCSR] = lan743x_csr_read(adapter, MAC_WUCSR);
+ rb[ETH_WK_SRC] = lan743x_csr_read(adapter, MAC_WK_SRC);
+}
+
+static int lan743x_get_regs_len(struct net_device *dev)
+{
+ return MAX_LAN743X_ETH_REGS * sizeof(u32);
+}
+
+static void lan743x_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+{
+ regs->version = LAN743X_ETH_REG_VERSION;
+
+ lan743x_common_regs(dev, regs, p);
+}
+
const struct ethtool_ops lan743x_ethtool_ops = {
.get_drvinfo = lan743x_ethtool_get_drvinfo,
.get_msglevel = lan743x_ethtool_get_msglevel,
@@ -1202,6 +1257,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.set_eee = lan743x_ethtool_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = lan743x_get_regs_len,
+ .get_regs = lan743x_get_regs,
#ifdef CONFIG_PM
.get_wol = lan743x_ethtool_get_wol,
.set_wol = lan743x_ethtool_set_wol,
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h
index d0d11a777a58..7f5996a52488 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.h
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h
@@ -6,6 +6,32 @@
#include "linux/ethtool.h"
+#define LAN743X_ETH_REG_VERSION 1
+
+enum {
+ ETH_PRIV_FLAGS,
+ ETH_ID_REV,
+ ETH_FPGA_REV,
+ ETH_STRAP_READ,
+ ETH_INT_STS,
+ ETH_HW_CFG,
+ ETH_PMT_CTL,
+ ETH_E2P_CMD,
+ ETH_E2P_DATA,
+ ETH_MAC_CR,
+ ETH_MAC_RX,
+ ETH_MAC_TX,
+ ETH_FLOW,
+ ETH_MII_ACC,
+ ETH_MII_DATA,
+ ETH_EEE_TX_LPI_REQ_DLY,
+ ETH_WUCSR,
+ ETH_WK_SRC,
+
+ /* Add new registers above */
+ MAX_LAN743X_ETH_REGS
+};
+
extern const struct ethtool_ops lan743x_ethtool_ops;
#endif /* _LAN743X_ETHTOOL_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index af81236b4b4e..a9a1dea6d731 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -22,20 +22,36 @@
#define MMD_ACCESS_WRITE 1
#define MMD_ACCESS_READ 2
#define MMD_ACCESS_READ_INC 3
+#define PCS_POWER_STATE_DOWN 0x6
+#define PCS_POWER_STATE_UP 0x4
static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
{
u32 chip_rev;
+ u32 cfg_load;
+ u32 hw_cfg;
u32 strap;
+ int ret;
+
+ /* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */
+ ret = lan743x_hs_syslock_acquire(adapter, 100);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Sys Lock acquire failed ret:%d\n", ret);
+ return;
+ }
- strap = lan743x_csr_read(adapter, STRAP_READ);
- if (strap & STRAP_READ_USE_SGMII_EN_) {
+ cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG);
+ lan743x_hs_syslock_release(adapter);
+ hw_cfg = lan743x_csr_read(adapter, HW_CFG);
+
+ if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ ||
+ hw_cfg & HW_CFG_RST_PROTECT_) {
+ strap = lan743x_csr_read(adapter, STRAP_READ);
if (strap & STRAP_READ_SGMII_EN_)
adapter->is_sgmii_en = true;
else
adapter->is_sgmii_en = false;
- netif_dbg(adapter, drv, adapter->netdev,
- "STRAP_READ: 0x%08X\n", strap);
} else {
chip_rev = lan743x_csr_read(adapter, FPGA_REV);
if (chip_rev) {
@@ -43,12 +59,12 @@ static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
adapter->is_sgmii_en = true;
else
adapter->is_sgmii_en = false;
- netif_dbg(adapter, drv, adapter->netdev,
- "FPGA_REV: 0x%08X\n", chip_rev);
} else {
adapter->is_sgmii_en = false;
}
}
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis");
}
static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
@@ -909,6 +925,318 @@ static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
return ret;
}
+static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
+{
+ u32 data;
+ int ret;
+
+ ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data,
+ !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "%s: error %d sgmii wait timeout\n", __func__, ret);
+
+ return ret;
+}
+
+static int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
+{
+ u32 mmd_access;
+ int ret;
+ u32 val;
+
+ if (mmd > 31) {
+ netif_err(adapter, probe, adapter->netdev,
+ "%s mmd should <= 31\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&adapter->sgmii_rw_lock);
+ /* Load Register Address */
+ mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
+ mmd_access |= (addr | SGMII_ACC_SGMII_BZY_);
+ lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
+ ret = lan743x_sgmii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ goto sgmii_unlock;
+
+ val = lan743x_csr_read(adapter, SGMII_DATA);
+ ret = (int)(val & SGMII_DATA_MASK_);
+
+sgmii_unlock:
+ mutex_unlock(&adapter->sgmii_rw_lock);
+
+ return ret;
+}
+
+static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
+ u8 mmd, u16 addr, u16 val)
+{
+ u32 mmd_access;
+ int ret;
+
+ if (mmd > 31) {
+ netif_err(adapter, probe, adapter->netdev,
+ "%s mmd should <= 31\n", __func__);
+ return -EINVAL;
+ }
+ mutex_lock(&adapter->sgmii_rw_lock);
+ /* Load Register Data */
+ lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_));
+ /* Load Register Address */
+ mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
+ mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_);
+ lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
+ ret = lan743x_sgmii_wait_till_not_busy(adapter);
+ mutex_unlock(&adapter->sgmii_rw_lock);
+
+ return ret;
+}
+
+static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
+ u16 baud)
+{
+ int mpllctrl0;
+ int mpllctrl1;
+ int miscctrl1;
+ int ret;
+
+ mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL0);
+ if (mpllctrl0 < 0)
+ return mpllctrl0;
+
+ mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_;
+ if (baud == VR_MII_BAUD_RATE_1P25GBPS) {
+ mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100;
+ /* mpll_baud_clk/4 */
+ miscctrl1 = 0xA;
+ } else {
+ mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125;
+ /* mpll_baud_clk/2 */
+ miscctrl1 = 0x5;
+ }
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1);
+ if (ret < 0)
+ return ret;
+
+ return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MISC_CTRL1, miscctrl1);
+}
+
+static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
+ bool enable)
+{
+ if (enable)
+ return lan743x_sgmii_mpll_set(adapter,
+ VR_MII_BAUD_RATE_3P125GBPS);
+ else
+ return lan743x_sgmii_mpll_set(adapter,
+ VR_MII_BAUD_RATE_1P25GBPS);
+}
+
+static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
+ bool *status)
+{
+ int ret;
+
+ ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_GEN2_4_MPLL_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
+ ret == VR_MII_MPLL_MULTIPLIER_50)
+ *status = true;
+ else
+ *status = false;
+
+ return 0;
+}
+
+static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+{
+ enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
+ int mii_ctrl;
+ int dgt_ctrl;
+ int an_ctrl;
+ int ret;
+
+ if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE)
+ /* Switch to 2.5 Gbps */
+ ret = lan743x_sgmii_2_5G_mode_set(adapter, true);
+ else
+ /* Switch to 10/100/1000 Mbps clock */
+ ret = lan743x_sgmii_2_5G_mode_set(adapter, false);
+ if (ret < 0)
+ return ret;
+
+ /* Enable SGMII Auto NEG */
+ mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
+ if (mii_ctrl < 0)
+ return mii_ctrl;
+
+ an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL);
+ if (an_ctrl < 0)
+ return an_ctrl;
+
+ dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_CTRL1);
+ if (dgt_ctrl < 0)
+ return dgt_ctrl;
+
+ if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) {
+ mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100);
+ mii_ctrl |= BMCR_SPEED1000;
+ dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
+ dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
+ /* In order for Auto-Negotiation to operate properly at
+ * 2.5 Gbps the 1.6ms link timer values must be adjusted
+ * The VR_MII_LINK_TIMER_CTRL Register must be set to
+ * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the
+ * VR_MII_DIG_CTRL1 Register set to 1
+ */
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_LINK_TIMER_CTRL, 0x7A1);
+ if (ret < 0)
+ return ret;
+ } else {
+ mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_;
+ dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
+ dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
+ }
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR,
+ mii_ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_CTRL1, dgt_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
+ VR_MII_AN_CTRL, an_ctrl);
+}
+
+static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
+{
+ u8 wait_cnt = 0;
+ u32 dig_sts;
+
+ do {
+ dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
+ VR_MII_DIG_STS);
+ if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >>
+ VR_MII_DIG_STS_PSEQ_STATE_POS_) == state)
+ break;
+ usleep_range(1000, 2000);
+ } while (wait_cnt++ < 10);
+
+ if (wait_cnt >= 10)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phy_device *phydev = netdev->phydev;
+ enum lan743x_sgmii_lsd lsd = POWER_DOWN;
+ int mii_ctl;
+ bool status;
+ int ret;
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
+ lsd = LINK_2500_MASTER;
+ else
+ lsd = LINK_2500_SLAVE;
+ break;
+ case SPEED_1000:
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
+ lsd = LINK_1000_MASTER;
+ else
+ lsd = LINK_1000_SLAVE;
+ break;
+ case SPEED_100:
+ if (phydev->duplex)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (phydev->duplex)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "Invalid speed %d\n", phydev->speed);
+ return -EINVAL;
+ }
+
+ adapter->sgmii_lsd = lsd;
+ ret = lan743x_sgmii_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d SGMII cfg failed\n", ret);
+ return ret;
+ }
+
+ ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "erro %d SGMII get mode failed\n", ret);
+ return ret;
+ }
+
+ if (status)
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII 2.5G mode enable\n");
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII 1G mode enable\n");
+
+ /* SGMII/1000/2500BASE-X PCS power down */
+ mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
+ if (mii_ctl < 0)
+ return mii_ctl;
+
+ mii_ctl |= BMCR_PDOWN;
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN);
+ if (ret < 0)
+ return ret;
+
+ /* SGMII/1000/2500BASE-X PCS power up */
+ mii_ctl &= ~BMCR_PDOWN;
+ ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
u8 *addr)
{
@@ -1124,6 +1452,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
data |= MAC_CR_CFG_H_;
data &= ~MAC_CR_CFG_L_;
break;
+ case SPEED_2500:
+ data |= MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
}
lan743x_csr_write(adapter, MAC_CR, data);
@@ -1135,6 +1467,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
lan743x_phy_update_flowcontrol(adapter, local_advertisement,
remote_advertisement);
lan743x_ptp_update_latency(adapter, phydev->speed);
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
+ lan743x_sgmii_config(adapter);
}
}
@@ -2875,6 +3211,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
pci11x1x_strap_get_status(adapter);
spin_lock_init(&adapter->eth_syslock_spinlock);
+ mutex_init(&adapter->sgmii_rw_lock);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
@@ -3124,6 +3461,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
const u8 ipv6_multicast[3] = { 0x33, 0x33 };
const u8 arp_type[2] = { 0x08, 0x06 };
int mask_index;
+ u32 sopass;
u32 pmtctl;
u32 wucsr;
u32 macrx;
@@ -3218,6 +3556,14 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
}
+ if (adapter->wolopts & WAKE_MAGICSECURE) {
+ sopass = *(u32 *)adapter->sopass;
+ lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass);
+ sopass = *(u16 *)&adapter->sopass[4];
+ lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass);
+ wucsr |= MAC_MP_SO_EN_;
+ }
+
lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
lan743x_csr_write(adapter, PMT_CTL, pmtctl);
lan743x_csr_write(adapter, MAC_RX, macrx);
@@ -3228,6 +3574,7 @@ static int lan743x_pm_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
lan743x_pcidev_shutdown(pdev);
@@ -3239,6 +3586,18 @@ static int lan743x_pm_suspend(struct device *dev)
if (adapter->wolopts)
lan743x_pm_set_wol(adapter);
+ if (adapter->is_pci11x1x) {
+ /* Save HW_CFG to config again in PM resume */
+ data = lan743x_csr_read(adapter, HW_CFG);
+ adapter->hw_cfg = data;
+ data |= (HW_CFG_RST_PROTECT_PCIE_ |
+ HW_CFG_D3_RESET_DIS_ |
+ HW_CFG_D3_VAUX_OVR_ |
+ HW_CFG_HOT_RESET_DIS_ |
+ HW_CFG_RST_PROTECT_);
+ lan743x_csr_write(adapter, HW_CFG, data);
+ }
+
/* Host sets PME_En, put D3hot */
return pci_prepare_to_sleep(pdev);
}
@@ -3254,6 +3613,10 @@ static int lan743x_pm_resume(struct device *dev)
pci_restore_state(pdev);
pci_save_state(pdev);
+ /* Restore HW_CFG that was saved during pm suspend */
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg);
+
ret = lan743x_hardware_init(adapter, pdev);
if (ret) {
netif_err(adapter, probe, adapter->netdev,
@@ -3270,6 +3633,9 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_info(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 1ca5f3216403..72adae4f2aa0 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -43,6 +43,11 @@
#define STRAP_READ_ADV_PM_DISABLE_ BIT(0)
#define HW_CFG (0x010)
+#define HW_CFG_RST_PROTECT_PCIE_ BIT(19)
+#define HW_CFG_HOT_RESET_DIS_ BIT(15)
+#define HW_CFG_D3_VAUX_OVR_ BIT(14)
+#define HW_CFG_D3_RESET_DIS_ BIT(13)
+#define HW_CFG_RST_PROTECT_ BIT(12)
#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
#define HW_CFG_EE_OTP_RELOAD_ BIT(4)
#define HW_CFG_LRST_ BIT(1)
@@ -92,6 +97,11 @@
#define CONFIG_REG_ADDR_BASE (0x0000)
#define ETH_EEPROM_REG_ADDR_BASE (0x0E00)
#define ETH_OTP_REG_ADDR_BASE (0x1000)
+#define GEN_SYS_CONFIG_LOAD_STARTED_REG (0x0078)
+#define ETH_SYS_CONFIG_LOAD_STARTED_REG (ETH_SYS_REG_ADDR_BASE + \
+ CONFIG_REG_ADDR_BASE + \
+ GEN_SYS_CONFIG_LOAD_STARTED_REG)
+#define GEN_SYS_LOAD_STARTED_REG_ETH_ BIT(4)
#define SYS_LOCK_REG (0x00A0)
#define SYS_LOCK_REG_MAIN_LOCK_ BIT(7)
#define SYS_LOCK_REG_GEN_PERI_LOCK_ BIT(5)
@@ -214,6 +224,7 @@
#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
#define MAC_WUCSR (0x140)
+#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
@@ -221,6 +232,8 @@
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_MP_SO_HI (0x148)
+#define MAC_MP_SO_LO (0x14C)
#define MAC_WUF_CFG0 (0x150)
#define MAC_NUM_OF_WUF_CFG (32)
@@ -280,11 +293,82 @@
#define MAC_WUCSR2 (0x600)
+#define SGMII_ACC (0x720)
+#define SGMII_ACC_SGMII_BZY_ BIT(31)
+#define SGMII_ACC_SGMII_WR_ BIT(30)
+#define SGMII_ACC_SGMII_MMD_SHIFT_ (16)
+#define SGMII_ACC_SGMII_MMD_MASK_ GENMASK(20, 16)
+#define SGMII_ACC_SGMII_MMD_VSR_ BIT(15)
+#define SGMII_ACC_SGMII_ADDR_SHIFT_ (0)
+#define SGMII_ACC_SGMII_ADDR_MASK_ GENMASK(15, 0)
+#define SGMII_DATA (0x724)
+#define SGMII_DATA_SHIFT_ (0)
+#define SGMII_DATA_MASK_ GENMASK(15, 0)
#define SGMII_CTL (0x728)
#define SGMII_CTL_SGMII_ENABLE_ BIT(31)
#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+/* Vendor Specific SGMII MMD details */
+#define SR_VSMMD_PCS_ID1 0x0004
+#define SR_VSMMD_PCS_ID2 0x0005
+#define SR_VSMMD_STS 0x0008
+#define SR_VSMMD_CTRL 0x0009
+
+#define VR_MII_DIG_CTRL1 0x8000
+#define VR_MII_DIG_CTRL1_VR_RST_ BIT(15)
+#define VR_MII_DIG_CTRL1_R2TLBE_ BIT(14)
+#define VR_MII_DIG_CTRL1_EN_VSMMD1_ BIT(13)
+#define VR_MII_DIG_CTRL1_CS_EN_ BIT(10)
+#define VR_MII_DIG_CTRL1_MAC_AUTO_SW_ BIT(9)
+#define VR_MII_DIG_CTRL1_INIT_ BIT(8)
+#define VR_MII_DIG_CTRL1_DTXLANED_0_ BIT(4)
+#define VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_ BIT(3)
+#define VR_MII_DIG_CTRL1_EN_2_5G_MODE_ BIT(2)
+#define VR_MII_DIG_CTRL1_BYP_PWRUP_ BIT(1)
+#define VR_MII_DIG_CTRL1_PHY_MODE_CTRL_ BIT(0)
+#define VR_MII_AN_CTRL 0x8001
+#define VR_MII_AN_CTRL_MII_CTRL_ BIT(8)
+#define VR_MII_AN_CTRL_SGMII_LINK_STS_ BIT(4)
+#define VR_MII_AN_CTRL_TX_CONFIG_ BIT(3)
+#define VR_MII_AN_CTRL_1000BASE_X_ (0)
+#define VR_MII_AN_CTRL_SGMII_MODE_ (2)
+#define VR_MII_AN_CTRL_QSGMII_MODE_ (3)
+#define VR_MII_AN_CTRL_PCS_MODE_SHIFT_ (1)
+#define VR_MII_AN_CTRL_PCS_MODE_MASK_ GENMASK(2, 1)
+#define VR_MII_AN_CTRL_MII_AN_INTR_EN_ BIT(0)
+#define VR_MII_AN_INTR_STS 0x8002
+#define VR_MII_AN_INTR_STS_LINK_UP_ BIT(4)
+#define VR_MII_AN_INTR_STS_SPEED_MASK_ GENMASK(3, 2)
+#define VR_MII_AN_INTR_STS_1000_MBPS_ BIT(3)
+#define VR_MII_AN_INTR_STS_100_MBPS_ BIT(2)
+#define VR_MII_AN_INTR_STS_10_MBPS_ (0)
+#define VR_MII_AN_INTR_STS_FDX_ BIT(1)
+#define VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR_ BIT(0)
+
+#define VR_MII_LINK_TIMER_CTRL 0x800A
+#define VR_MII_DIG_STS 0x8010
+#define VR_MII_DIG_STS_PSEQ_STATE_MASK_ GENMASK(4, 2)
+#define VR_MII_DIG_STS_PSEQ_STATE_POS_ (2)
+#define VR_MII_GEN2_4_MPLL_CTRL0 0x8078
+#define VR_MII_MPLL_CTRL0_REF_CLK_DIV2_ BIT(12)
+#define VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_ BIT(4)
+#define VR_MII_GEN2_4_MPLL_CTRL1 0x8079
+#define VR_MII_MPLL_CTRL1_MPLL_MULTIPLIER_ GENMASK(6, 0)
+#define VR_MII_BAUD_RATE_3P125GBPS (3125)
+#define VR_MII_BAUD_RATE_1P25GBPS (1250)
+#define VR_MII_MPLL_MULTIPLIER_125 (125)
+#define VR_MII_MPLL_MULTIPLIER_100 (100)
+#define VR_MII_MPLL_MULTIPLIER_50 (50)
+#define VR_MII_MPLL_MULTIPLIER_40 (40)
+#define VR_MII_GEN2_4_MISC_CTRL1 0x809A
+#define VR_MII_CTRL1_RX_RATE_0_MASK_ GENMASK(3, 2)
+#define VR_MII_CTRL1_RX_RATE_0_SHIFT_ (2)
+#define VR_MII_CTRL1_TX_RATE_0_MASK_ GENMASK(1, 0)
+#define VR_MII_MPLL_BAUD_CLK (0)
+#define VR_MII_MPLL_BAUD_CLK_DIV_2 (1)
+#define VR_MII_MPLL_BAUD_CLK_DIV_4 (2)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
@@ -906,12 +990,28 @@ struct lan743x_rx {
struct sk_buff *skb_head, *skb_tail;
};
+/* SGMII Link Speed Duplex status */
+enum lan743x_sgmii_lsd {
+ POWER_DOWN = 0,
+ LINK_DOWN,
+ ANEG_BUSY,
+ LINK_10HD,
+ LINK_10FD,
+ LINK_100HD,
+ LINK_100FD,
+ LINK_1000_MASTER,
+ LINK_1000_SLAVE,
+ LINK_2500_MASTER,
+ LINK_2500_SLAVE
+};
+
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
int msg_enable;
#ifdef CONFIG_PM
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
@@ -931,12 +1031,16 @@ struct lan743x_adapter {
spinlock_t eth_syslock_spinlock;
bool eth_syslock_en;
u32 eth_syslock_acquire_cnt;
+ struct mutex sgmii_rw_lock;
+ /* SGMII Link Speed & Duplex status */
+ enum lan743x_sgmii_lsd sgmii_lsd;
u8 max_tx_channels;
u8 used_tx_channels;
u8 max_vector_count;
#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
u32 flags;
+ u32 hw_cfg;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -1049,5 +1153,7 @@ struct lan743x_rx_buffer_info {
u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
+int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter, u16 timeout);
+void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 5edc8b7176c8..ec07f7d0528c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -394,15 +394,13 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
struct sparx5 *spx5 = port->sparx5;
u16 pgid_idx, vid;
u32 mact_entry;
+ bool is_host;
int res, err;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
- if (netif_is_bridge_master(v->obj.orig_dev)) {
- sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid);
- return 0;
- }
+ is_host = netif_is_bridge_master(v->obj.orig_dev);
/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
* Fall back to bridge vid 1.
@@ -419,17 +417,33 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
/* MC_IDX starts after the port masks in the PGID table */
pgid_idx += SPX5_PORTS;
- sparx5_pgid_update_mask(port, pgid_idx, true);
+
+ if (is_host)
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid_idx));
+ else
+ sparx5_pgid_update_mask(port, pgid_idx, true);
+
} else {
err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
if (err) {
netdev_warn(dev, "multicast pgid table full\n");
return err;
}
- sparx5_pgid_update_mask(port, pgid_idx, true);
+
+ if (is_host)
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid_idx));
+ else
+ sparx5_pgid_update_mask(port, pgid_idx, true);
+
err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
+
if (err) {
netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
+ sparx5_pgid_free(spx5, pgid_idx);
sparx5_pgid_update_mask(port, pgid_idx, false);
return err;
}
@@ -466,17 +480,12 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *spx5 = port->sparx5;
u16 pgid_idx, vid;
- u32 mact_entry, res, pgid_entry[3];
- int err;
+ u32 mact_entry, res, pgid_entry[3], misc_cfg;
+ bool host_ena;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
- if (netif_is_bridge_master(v->obj.orig_dev)) {
- sparx5_mact_forget(spx5, v->addr, v->vid);
- return 0;
- }
-
if (!br_vlan_enabled(spx5->hw_bridge_dev))
vid = 1;
else
@@ -489,15 +498,21 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
/* MC_IDX starts after the port masks in the PGID table */
pgid_idx += SPX5_PORTS;
- sparx5_pgid_update_mask(port, pgid_idx, false);
+
+ if (netif_is_bridge_master(v->obj.orig_dev))
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(0),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid_idx));
+ else
+ sparx5_pgid_update_mask(port, pgid_idx, false);
+
+ misc_cfg = spx5_rd(spx5, ANA_AC_PGID_MISC_CFG(pgid_idx));
+ host_ena = ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(misc_cfg);
sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
- if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS)) {
- /* No ports are in MC group. Remove entry */
- err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
- if (err)
- return err;
- }
+ if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS) && !host_ena)
+ /* No ports or CPU are in MC group. Remove entry */
+ return sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
}
return 0;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
index 41ecd156e95f..4a6efe6ada08 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma.h
+++ b/drivers/net/ethernet/microsoft/mana/gdma.h
@@ -348,6 +348,7 @@ struct gdma_context {
struct completion eq_test_event;
u32 test_event_eq_id;
+ bool is_pf;
void __iomem *bar0_va;
void __iomem *shm_base;
void __iomem *db_page_base;
@@ -469,6 +470,15 @@ struct gdma_eqe {
#define GDMA_REG_DB_PAGE_SIZE 0x10
#define GDMA_REG_SHM_OFFSET 0x18
+#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
+#define GDMA_PF_REG_DB_PAGE_OFF 0xC8
+#define GDMA_PF_REG_SHM_OFF 0x70
+
+#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
+
+#define MANA_PF_DEVICE_ID 0x00B9
+#define MANA_VF_DEVICE_ID 0x00BA
+
struct gdma_posted_wqe_info {
u32 wqe_size_in_bu;
};
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 49b85ca578b0..5f9240182351 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -18,7 +18,24 @@ static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
return readq(g->bar0_va + offset);
}
-static void mana_gd_init_registers(struct pci_dev *pdev)
+static void mana_gd_init_pf_regs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ void __iomem *sriov_base_va;
+ u64 sriov_base_off;
+
+ gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
+ gc->db_page_base = gc->bar0_va +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
+ sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
+
+ sriov_base_va = gc->bar0_va + sriov_base_off;
+ gc->shm_base = sriov_base_va +
+ mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
+}
+
+static void mana_gd_init_vf_regs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -30,6 +47,16 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
}
+static void mana_gd_init_registers(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ if (gc->is_pf)
+ mana_gd_init_pf_regs(pdev);
+ else
+ mana_gd_init_vf_regs(pdev);
+}
+
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1304,6 +1331,11 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_gd_remove_irqs(pdev);
}
+static bool mana_is_pf(unsigned short dev_id)
+{
+ return dev_id == MANA_PF_DEVICE_ID;
+}
+
static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct gdma_context *gc;
@@ -1340,10 +1372,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bar0_va)
goto free_gc;
+ gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
-
err = mana_gd_setup(pdev);
if (err)
goto unmap_bar;
@@ -1438,7 +1470,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
#endif
static const struct pci_device_id mana_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
{ }
};
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 078d6a5a0768..543a5d5c304f 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -158,6 +158,14 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
hwc->rxq->msg_buf->gpa_mkey = val;
hwc->txq->msg_buf->gpa_mkey = val;
break;
+
+ case HWC_INIT_DATA_PF_DEST_RQ_ID:
+ hwc->pf_dest_vrq_id = val;
+ break;
+
+ case HWC_INIT_DATA_PF_DEST_CQ_ID:
+ hwc->pf_dest_vrcq_id = val;
+ break;
}
break;
@@ -773,10 +781,13 @@ void mana_hwc_destroy_channel(struct gdma_context *gc)
int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
const void *req, u32 resp_len, void *resp)
{
+ struct gdma_context *gc = hwc->gdma_dev->gdma_context;
struct hwc_work_request *tx_wr;
struct hwc_wq *txq = hwc->txq;
struct gdma_req_hdr *req_msg;
struct hwc_caller_ctx *ctx;
+ u32 dest_vrcq = 0;
+ u32 dest_vrq = 0;
u16 msg_id;
int err;
@@ -803,7 +814,12 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
tx_wr->msg_size = req_len;
- err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
+ if (gc->is_pf) {
+ dest_vrq = hwc->pf_dest_vrq_id;
+ dest_vrcq = hwc->pf_dest_vrcq_id;
+ }
+
+ err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
if (err) {
dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
goto out;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h
index 31c6e83c454a..6a757a6e2732 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.h
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.h
@@ -20,6 +20,8 @@
#define HWC_INIT_DATA_MAX_NUM_CQS 7
#define HWC_INIT_DATA_PDID 8
#define HWC_INIT_DATA_GPA_MKEY 9
+#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
+#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
@@ -178,6 +180,9 @@ struct hw_channel_context {
struct semaphore sema;
struct gdma_resource inflight_msg_res;
+ u32 pf_dest_vrq_id;
+ u32 pf_dest_vrcq_id;
+
struct hwc_caller_ctx *caller_ctx;
};
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index d36405af9432..d58be64374c8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -53,12 +53,14 @@ struct mana_stats_rx {
u64 bytes;
u64 xdp_drop;
u64 xdp_tx;
+ u64 xdp_redirect;
struct u64_stats_sync syncp;
};
struct mana_stats_tx {
u64 packets;
u64 bytes;
+ u64 xdp_xmit;
struct u64_stats_sync syncp;
};
@@ -311,6 +313,8 @@ struct mana_rxq {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
struct page *xdp_save_page;
+ bool xdp_flush;
+ int xdp_rc; /* XDP redirect return code */
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
@@ -374,6 +378,7 @@ struct mana_port_context {
unsigned int num_queues;
mana_handle_t port_handle;
+ mana_handle_t pf_filter_handle;
u16 port_idx;
@@ -395,6 +400,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags);
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct xdp_buff *xdp, void *buf_va, uint pkt_len);
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
@@ -420,6 +427,12 @@ enum mana_command_code {
MANA_FENCE_RQ = 0x20006,
MANA_CONFIG_VPORT_RX = 0x20007,
MANA_QUERY_VPORT_CONFIG = 0x20008,
+
+ /* Privileged commands for the PF mode */
+ MANA_REGISTER_FILTER = 0x28000,
+ MANA_DEREGISTER_FILTER = 0x28001,
+ MANA_REGISTER_HW_PORT = 0x28003,
+ MANA_DEREGISTER_HW_PORT = 0x28004,
};
/* Query Device Configuration */
@@ -547,6 +560,63 @@ struct mana_cfg_rx_steer_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Register HW vPort */
+struct mana_register_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ u16 attached_gfid;
+ u8 is_pf_default_vport;
+ u8 reserved1;
+ u8 allow_all_ether_types;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+}; /* HW DATA */
+
+struct mana_register_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+/* Deregister HW vPort */
+struct mana_deregister_hw_vport_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t hw_vport_handle;
+}; /* HW DATA */
+
+struct mana_deregister_hw_vport_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
+/* Register filter */
+struct mana_register_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u8 mac_addr[6];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+ u16 reserved5;
+ u32 reserved6;
+ u32 reserved7;
+ u32 reserved8;
+}; /* HW DATA */
+
+struct mana_register_filter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+/* Deregister filter */
+struct mana_deregister_filter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t filter_handle;
+}; /* HW DATA */
+
+struct mana_deregister_filter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW DATA */
+
#define MANA_MAX_NUM_QUEUES 64
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 1d2f948b5c00..421fd39ff3a8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -32,9 +32,55 @@ void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++;
}
+static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame,
+ u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_set_queue_mapping(skb, q_idx);
+
+ mana_xdp_tx(skb, ndev);
+
+ return 0;
+}
+
+int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct mana_stats_tx *tx_stats;
+ int i, count = 0;
+ u16 q_idx;
+
+ if (unlikely(!apc->port_is_up))
+ return 0;
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (mana_xdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &apc->tx_qp[q_idx].txq.stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
+
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct xdp_buff *xdp, void *buf_va, uint pkt_len)
{
+ struct mana_stats_rx *rx_stats;
struct bpf_prog *prog;
u32 act = XDP_PASS;
@@ -49,12 +95,30 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
act = bpf_prog_run_xdp(prog, xdp);
+ rx_stats = &rxq->stats;
+
switch (act) {
case XDP_PASS:
case XDP_TX:
case XDP_DROP:
break;
+ case XDP_REDIRECT:
+ rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog);
+ if (!rxq->xdp_rc) {
+ rxq->xdp_flush = true;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+ rx_stats->xdp_redirect++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ }
+
+ fallthrough;
+
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index b1d773823232..9259a74eca40 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -6,6 +6,7 @@
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/filter.h>
#include <linux/mm.h>
#include <net/checksum.h>
@@ -382,6 +383,7 @@ static const struct net_device_ops mana_devops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
.ndo_bpf = mana_bpf,
+ .ndo_xdp_xmit = mana_xdp_xmit,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -446,6 +448,119 @@ static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
return 0;
}
+static int mana_pf_register_hw_vport(struct mana_port_context *apc)
+{
+ struct mana_register_hw_vport_resp resp = {};
+ struct mana_register_hw_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
+ sizeof(req), sizeof(resp));
+ req.attached_gfid = 1;
+ req.is_pf_default_vport = 1;
+ req.allow_all_ether_types = 1;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ apc->port_handle = resp.hw_vport_handle;
+ return 0;
+}
+
+static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
+{
+ struct mana_deregister_hw_vport_resp resp = {};
+ struct mana_deregister_hw_vport_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
+ sizeof(req), sizeof(resp));
+ req.hw_vport_handle = apc->port_handle;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(apc->ndev,
+ "Failed to deregister hw vPort: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
+static int mana_pf_register_filter(struct mana_port_context *apc)
+{
+ struct mana_register_filter_resp resp = {};
+ struct mana_register_filter_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
+ err, resp.hdr.status);
+ return err ? err : -EPROTO;
+ }
+
+ apc->pf_filter_handle = resp.filter_handle;
+ return 0;
+}
+
+static void mana_pf_deregister_filter(struct mana_port_context *apc)
+{
+ struct mana_deregister_filter_resp resp = {};
+ struct mana_deregister_filter_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
+ sizeof(req), sizeof(resp));
+ req.filter_handle = apc->pf_filter_handle;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+ return;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
+ sizeof(resp));
+ if (err || resp.hdr.status)
+ netdev_err(apc->ndev,
+ "Failed to deregister filter: %d, 0x%x\n",
+ err, resp.hdr.status);
+}
+
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
u16 *max_num_vports)
@@ -1007,6 +1122,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
+ if (act == XDP_REDIRECT && !rxq->xdp_rc)
+ return;
+
if (act != XDP_PASS && act != XDP_TX)
goto drop_xdp;
@@ -1162,11 +1280,14 @@ drop:
static void mana_poll_rx_cq(struct mana_cq *cq)
{
struct gdma_comp *comp = cq->gdma_comp_buf;
+ struct mana_rxq *rxq = cq->rxq;
int comp_read, i;
comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
+ rxq->xdp_flush = false;
+
for (i = 0; i < comp_read; i++) {
if (WARN_ON_ONCE(comp[i].is_sq))
return;
@@ -1175,8 +1296,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
return;
- mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
+ mana_process_rx_cqe(rxq, cq, &comp[i]);
}
+
+ if (rxq->xdp_flush)
+ xdp_do_flush();
}
static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
@@ -1653,6 +1777,7 @@ out:
static void mana_destroy_vport(struct mana_port_context *apc)
{
+ struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_rxq *rxq;
u32 rxq_idx;
@@ -1666,6 +1791,9 @@ static void mana_destroy_vport(struct mana_port_context *apc)
}
mana_destroy_txq(apc);
+
+ if (gd->gdma_context->is_pf)
+ mana_pf_deregister_hw_vport(apc);
}
static int mana_create_vport(struct mana_port_context *apc,
@@ -1676,6 +1804,12 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
+ if (gd->gdma_context->is_pf) {
+ err = mana_pf_register_hw_vport(apc);
+ if (err)
+ return err;
+ }
+
err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
if (err)
return err;
@@ -1755,6 +1889,7 @@ reset_apc:
int mana_alloc_queues(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
int err;
err = mana_create_vport(apc, ndev);
@@ -1781,6 +1916,12 @@ int mana_alloc_queues(struct net_device *ndev)
if (err)
goto destroy_vport;
+ if (gd->gdma_context->is_pf) {
+ err = mana_pf_register_filter(apc);
+ if (err)
+ goto destroy_vport;
+ }
+
mana_chn_setxdp(apc, mana_xdp_get(apc));
return 0;
@@ -1825,6 +1966,7 @@ int mana_attach(struct net_device *ndev)
static int mana_dealloc_queues(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_txq *txq;
int i, err;
@@ -1833,6 +1975,9 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
+ if (gd->gdma_context->is_pf)
+ mana_pf_deregister_filter(apc);
+
/* No packet can be transmitted now since apc->port_is_up is false.
* There is still a tiny chance that mana_poll_tx_cq() can re-enable
* a txq because it may not timely see apc->port_is_up being cleared
@@ -1915,6 +2060,7 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->max_queues = gc->max_num_queues;
apc->num_queues = gc->max_num_queues;
apc->port_handle = INVALID_MANA_HANDLE;
+ apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
ndev->netdev_ops = &mana_devops;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index e13f2453eabb..c530db76880f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -23,7 +23,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues * 6;
+ return ARRAY_SIZE(mana_eth_stats) + num_queues * 8;
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -50,6 +50,8 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_%d_xdp_tx", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_redirect", i);
+ p += ETH_GSTRING_LEN;
}
for (i = 0; i < num_queues; i++) {
@@ -57,6 +59,8 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_xdp_xmit", i);
+ p += ETH_GSTRING_LEN;
}
}
@@ -70,6 +74,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_stats_tx *tx_stats;
unsigned int start;
u64 packets, bytes;
+ u64 xdp_redirect;
+ u64 xdp_xmit;
u64 xdp_drop;
u64 xdp_tx;
int q, i = 0;
@@ -89,12 +95,14 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
bytes = rx_stats->bytes;
xdp_drop = rx_stats->xdp_drop;
xdp_tx = rx_stats->xdp_tx;
+ xdp_redirect = rx_stats->xdp_redirect;
} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
data[i++] = xdp_tx;
+ data[i++] = xdp_redirect;
}
for (q = 0; q < num_queues; q++) {
@@ -104,10 +112,12 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_xmit;
}
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 8da7e25a47c9..d4649e4ee0e7 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -3367,6 +3367,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
+ mutex_init(&ocelot->tas_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 87ad2137ba06..09c703efe946 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -72,6 +72,10 @@ int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ if (ocelot->ops->tas_clock_adjust)
+ ocelot->ops->tas_clock_adjust(ocelot);
+
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_settime64);
@@ -105,6 +109,9 @@ int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ if (ocelot->ops->tas_clock_adjust)
+ ocelot->ops->tas_clock_adjust(ocelot);
} else {
/* Fall back using ocelot_ptp_settime64 which is not exact. */
struct timespec64 ts;
@@ -117,6 +124,7 @@ int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
ocelot_ptp_settime64(ptp, &ts);
}
+
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_adjtime);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 61497c3e4cfb..971dde8c3286 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2692,7 +2692,7 @@ again:
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header can be at most 1KB long */
- cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cum_len = -skb_tcp_all_headers(skb);
/* for IPv6 TSO, the checksum offset stores the
* TCP header length, to save the firmware from
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 50bca486a244..9aae7f1eb5d2 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -158,7 +158,7 @@ MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
I. Board Compatibility
This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
-It also works with other chips in in the DP83810 series.
+It also works with other chips in the DP83810 series.
II. Board-specific settings
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 0c0d127906dd..09a89e72f904 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -32,28 +32,4 @@ config S2IO
To compile this driver as a module, choose M here. The module
will be called s2io.
-config VXGE
- tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter"
- depends on PCI
- help
- This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter. These were originally released from
- Neterion, which was later acquired by Exar. So, the adapters might be
- labeled as either one, depending on its age.
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/ethernet/neterion/vxge.rst>.
-
- To compile this driver as a module, choose M here. The module
- will be called vxge.
-
-config VXGE_DEBUG_TRACE_ALL
- bool "Enabling All Debug trace statements in driver"
- default n
- depends on VXGE
- help
- Say Y here if you want to enabling all the debug trace statements in
- the vxge driver. By default only few debug trace statements are
- enabled.
-
endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
index 87ede8a47bb8..de98b4e6eff9 100644
--- a/drivers/net/ethernet/neterion/Makefile
+++ b/drivers/net/ethernet/neterion/Makefile
@@ -4,4 +4,3 @@
#
obj-$(CONFIG_S2IO) += s2io.o
-obj-$(CONFIG_VXGE) += vxge/
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6dd451adc331..30f955efa830 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2156,7 +2156,7 @@ static int verify_xena_quiescence(struct s2io_nic *sp)
/*
* In PCI 33 mode, the P_PLL is not used, and therefore,
- * the the P_PLL_LOCK bit in the adapter_status register will
+ * the P_PLL_LOCK bit in the adapter_status register will
* not be asserted.
*/
if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
@@ -3817,7 +3817,7 @@ static irqreturn_t s2io_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int s2io_test_msi(struct s2io_nic *sp)
{
struct pci_dev *pdev = sp->pdev;
@@ -5492,7 +5492,7 @@ s2io_ethtool_gringparam(struct net_device *dev,
}
/**
- * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
+ * s2io_ethtool_getpause_data -Pause frame generation and reception.
* @dev: pointer to netdev
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
@@ -7449,7 +7449,7 @@ aggregate:
* @link : inidicates whether link is UP/DOWN.
* Description:
* This function stops/starts the Tx queue depending on whether the link
- * status of the NIC is is down or up. This is called by the Alarm
+ * status of the NIC is down or up. This is called by the Alarm
* interrupt handler whenever a link change interrupt comes up.
* Return value:
* void.
@@ -7732,7 +7732,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
* Setting the device configuration parameters.
* Most of these parameters can be specified by the user during
* module insertion as they are module loadable parameters. If
- * these parameters are not not specified during load time, they
+ * these parameters are not specified during load time, they
* are initialized with default values.
*/
config = &sp->config;
diff --git a/drivers/net/ethernet/neterion/vxge/Makefile b/drivers/net/ethernet/neterion/vxge/Makefile
deleted file mode 100644
index 0820e81ca7fb..000000000000
--- a/drivers/net/ethernet/neterion/vxge/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O
-# Virtualized Server Adapter linux driver
-
-obj-$(CONFIG_VXGE) += vxge.o
-
-vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
deleted file mode 100644
index a3204a7ef750..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ /dev/null
@@ -1,5099 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/vmalloc.h>
-#include <linux/etherdevice.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-main.h"
-
-#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
- status = __vxge_hw_vpath_stats_access(vpath, \
- VXGE_HW_STATS_OP_READ, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
-static void
-vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
-{
- u64 val64;
-
- val64 = readq(&vp_reg->rxmac_vcfg0);
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- writeq(val64, &vp_reg->rxmac_vcfg0);
- val64 = readq(&vp_reg->rxmac_vcfg0);
-}
-
-/*
- * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
- */
-int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct __vxge_hw_virtualpath *vpath;
- u64 val64, rxd_count, rxd_spat;
- int count = 0, total_count = 0;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
-
- vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
-
- /* Check that the ring controller for this vpath has enough free RxDs
- * to send frames to the host. This is done by reading the
- * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
- * RXD_SPAT value for the vpath.
- */
- val64 = readq(&vp_reg->prc_cfg6);
- rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
- /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
- * leg room.
- */
- rxd_spat *= 2;
-
- do {
- mdelay(1);
-
- rxd_count = readq(&vp_reg->prc_rxd_doorbell);
-
- /* Check that the ring controller for this vpath does
- * not have any frame in its pipeline.
- */
- val64 = readq(&vp_reg->frm_in_progress_cnt);
- if ((rxd_count <= rxd_spat) || (val64 > 0))
- count = 0;
- else
- count++;
- total_count++;
- } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
- (total_count < VXGE_HW_MAX_POLLING_COUNT));
-
- if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
- printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
- __func__);
-
- return total_count;
-}
-
-/* vxge_hw_device_wait_receive_idle - This function waits until all frames
- * stored in the frame buffer for each vpath assigned to the given
- * function (hldev) have been sent to the host.
- */
-void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
-{
- int i, total_count = 0;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
- if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
- break;
- }
-}
-
-/*
- * __vxge_hw_device_register_poll
- * Will poll certain register for specified amount of time.
- * Will poll until masked bit is not cleared.
- */
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
-{
- u64 val64;
- u32 i = 0;
-
- udelay(10);
-
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- udelay(100);
- } while (++i <= 9);
-
- i = 0;
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- mdelay(1);
- } while (++i <= max_millis);
-
- return VXGE_HW_FAIL;
-}
-
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
- u64 mask, u32 max_millis)
-{
- __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
- wmb();
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
- wmb();
-
- return __vxge_hw_device_register_poll(addr, mask, max_millis);
-}
-
-static enum vxge_hw_status
-vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
- u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
- u64 *steer_ctrl)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- enum vxge_hw_status status;
- u64 val64;
- u32 retry = 0, max_retry = 3;
-
- spin_lock(&vpath->lock);
- if (!vpath->vp_open) {
- spin_unlock(&vpath->lock);
- max_retry = 100;
- }
-
- writeq(*data0, &vp_reg->rts_access_steer_data0);
- writeq(*data1, &vp_reg->rts_access_steer_data1);
- wmb();
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- *steer_ctrl;
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- /* The __vxge_hw_device_register_poll can udelay for a significant
- * amount of time, blocking other process from the CPU. If it delays
- * for ~5secs, a NMI error can occur. A way around this is to give up
- * the processor via msleep, but this is not allowed is under lock.
- * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
- * 1sec and sleep for 10ms until the firmware operation has completed
- * or timed-out.
- */
- while ((status != VXGE_HW_OK) && retry++ < max_retry) {
- if (!vpath->vp_open)
- msleep(20);
- status = __vxge_hw_device_register_poll(
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
- }
-
- if (status != VXGE_HW_OK)
- goto out;
-
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- *data0 = readq(&vp_reg->rts_access_steer_data0);
- *data1 = readq(&vp_reg->rts_access_steer_data1);
- *steer_ctrl = val64;
- } else
- status = VXGE_HW_FAIL;
-
-out:
- if (vpath->vp_open)
- spin_unlock(&vpath->lock);
- return status;
-}
-
-enum vxge_hw_status
-vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
- u32 *minor, u32 *build)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_READ,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
- *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
- *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-
- return status;
-}
-
-enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- u32 ret;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
- goto exit;
- }
-
- ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
- if (ret != 1) {
- vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
- __func__, ret);
- status = VXGE_HW_FAIL;
- }
-
-exit:
- return status;
-}
-
-enum vxge_hw_status
-vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- int ret_code, sec_code;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- /* send upgrade start command */
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_START,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
- __func__);
- return status;
- }
-
- /* Transfer fw image to adapter 16 bytes at a time */
- for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
- steer_ctrl = 0;
-
- /* The next 128bits of fwdata to be loaded onto the adapter */
- data0 = *((u64 *)fwdata);
- data1 = *((u64 *)fwdata + 1);
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_UPGRADE_ACTION,
- VXGE_HW_FW_UPGRADE_MEMO,
- VXGE_HW_FW_UPGRADE_OFFSET_SEND,
- &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
- __func__);
- goto out;
- }
-
- ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
- switch (ret_code) {
- case VXGE_HW_FW_UPGRADE_OK:
- /* All OK, send next 16 bytes. */
- break;
- case VXGE_FW_UPGRADE_BYTES2SKIP:
- /* skip bytes in the stream */
- fwdata += (data0 >> 8) & 0xFFFFFFFF;
- break;
- case VXGE_HW_FW_UPGRADE_DONE:
- goto out;
- case VXGE_HW_FW_UPGRADE_ERR:
- sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
- switch (sec_code) {
- case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
- case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
- printk(KERN_ERR
- "corrupted data from .ncf file\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
- case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
- printk(KERN_ERR "invalid .ncf file\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
- printk(KERN_ERR "buffer overflow\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
- printk(KERN_ERR "failed to flash the image\n");
- break;
- case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
- printk(KERN_ERR
- "generic error. Unknown error type\n");
- break;
- default:
- printk(KERN_ERR "Unknown error of type %d\n",
- sec_code);
- break;
- }
- status = VXGE_HW_FAIL;
- goto out;
- default:
- printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
- status = VXGE_HW_FAIL;
- goto out;
- }
- /* point to next 16 bytes */
- fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
- }
-out:
- return status;
-}
-
-enum vxge_hw_status
-vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
- struct eprom_image *img)
-{
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status;
- int i;
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
- data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_API_GET_EPROM_REV,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- break;
-
- img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
- img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
- img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
- img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_channel_free - Free memory allocated for channel
- * This function deallocates memory from the channel and various arrays
- * in the channel
- */
-static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
-{
- kfree(channel->work_arr);
- kfree(channel->free_arr);
- kfree(channel->reserve_arr);
- kfree(channel->orig_arr);
- kfree(channel);
-}
-
-/*
- * __vxge_hw_channel_initialize - Initialize a channel
- * This function initializes a channel by properly setting the
- * various references
- */
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
-{
- u32 i;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = channel->vph->vpath;
-
- if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
- for (i = 0; i < channel->length; i++)
- channel->orig_arr[i] = channel->reserve_arr[i];
- }
-
- switch (channel->type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- vpath->fifoh = (struct __vxge_hw_fifo *)channel;
- channel->stats = &((struct __vxge_hw_fifo *)
- channel)->stats->common_stats;
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- vpath->ringh = (struct __vxge_hw_ring *)channel;
- channel->stats = &((struct __vxge_hw_ring *)
- channel)->stats->common_stats;
- break;
- default:
- break;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_channel_reset - Resets a channel
- * This function resets a channel by properly setting the various references
- */
-static enum vxge_hw_status
-__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
-{
- u32 i;
-
- for (i = 0; i < channel->length; i++) {
- if (channel->reserve_arr != NULL)
- channel->reserve_arr[i] = channel->orig_arr[i];
- if (channel->free_arr != NULL)
- channel->free_arr[i] = NULL;
- if (channel->work_arr != NULL)
- channel->work_arr[i] = NULL;
- }
- channel->free_ptr = channel->length;
- channel->reserve_ptr = channel->length;
- channel->reserve_top = 0;
- channel->post_index = 0;
- channel->compl_index = 0;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_pci_e_init
- * Initialize certain PCI/PCI-X configuration registers
- * with recommended values. Save config space for future hw resets.
- */
-static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
-{
- u16 cmd = 0;
-
- /* Set the PErr Repconse bit and SERR in PCI command register. */
- pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
- cmd |= 0x140;
- pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
-
- pci_save_state(hldev->pdev);
-}
-
-/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
- * in progress
- * This routine checks the vpath reset in progress register is turned zero
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
-{
- enum vxge_hw_status status;
- status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
- VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
- return status;
-}
-
-/*
- * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
- * Set the swapper bits appropriately for the lagacy section.
- */
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- wmb();
-
- switch (val64) {
- case VXGE_HW_SWAPPER_INITIAL_VALUE:
- return status;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- break;
-
- case VXGE_HW_SWAPPER_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
- }
-
- wmb();
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
- status = VXGE_HW_ERR_SWAPPER_CTRL;
-
- return status;
-}
-
-/*
- * __vxge_hw_device_toc_get
- * This routine sets the swapper and reads the toc pointer and returns the
- * memory mapped address of the toc
- */
-static struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0)
-{
- u64 val64;
- struct vxge_hw_toc_reg __iomem *toc = NULL;
- enum vxge_hw_status status;
-
- struct vxge_hw_legacy_reg __iomem *legacy_reg =
- (struct vxge_hw_legacy_reg __iomem *)bar0;
-
- status = __vxge_hw_legacy_swapper_set(legacy_reg);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&legacy_reg->toc_first_pointer);
- toc = bar0 + val64;
-exit:
- return toc;
-}
-
-/*
- * __vxge_hw_device_reg_addr_get
- * This routine sets the swapper and reads the toc pointer and initializes the
- * register location pointers in the device object. It waits until the ric is
- * completed initializing registers.
- */
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
- u32 i;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- hldev->legacy_reg = hldev->bar0;
-
- hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
- if (hldev->toc_reg == NULL) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- val64 = readq(&hldev->toc_reg->toc_common_pointer);
- hldev->common_reg = hldev->bar0 + val64;
-
- val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
- hldev->mrpcim_reg = hldev->bar0 + val64;
-
- for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
- hldev->srpcim_reg[i] = hldev->bar0 + val64;
- }
-
- for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
- hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
- }
-
- for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
- val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
- hldev->vpath_reg[i] = hldev->bar0 + val64;
- }
-
- val64 = readq(&hldev->toc_reg->toc_kdfc);
-
- switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
- case 0:
- hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
- break;
- default:
- break;
- }
-
- status = __vxge_hw_device_vpath_reset_in_prog_check(
- (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
- * This routine returns the Access Rights of the driver
- */
-static u32
-__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
-{
- u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
-
- switch (host_type) {
- case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
- if (func_id == 0) {
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- }
- break;
- case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
- case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
- case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
- break;
- case VXGE_HW_SR_VH_FUNCTION0:
- case VXGE_HW_VH_NORMAL_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
- break;
- }
-
- return access_rights;
-}
-/*
- * __vxge_hw_device_is_privilaged
- * This routine checks if the device function is privilaged or not
- */
-
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
-{
- if (__vxge_hw_device_access_rights_get(host_type,
- func_id) &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
- return VXGE_HW_OK;
- else
- return VXGE_HW_ERR_PRIVILEGED_OPERATION;
-}
-
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
- u64 val64;
-
- val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
- return
- (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_device_host_info_get
- * This routine returns the host type assignments
- */
-static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
- u32 i;
-
- val64 = readq(&hldev->common_reg->host_type_assignments);
-
- hldev->host_type =
- (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
-
- hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpath_assignments & vxge_mBIT(i)))
- continue;
-
- hldev->func_id =
- __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
-
- hldev->access_rights = __vxge_hw_device_access_rights_get(
- hldev->host_type, hldev->func_id);
-
- hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
- hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
-
- hldev->first_vp_id = i;
- break;
- }
-}
-
-/*
- * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
- * link width and signalling rate.
- */
-static enum vxge_hw_status
-__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
-{
- struct pci_dev *dev = hldev->pdev;
- u16 lnk;
-
- /* Get the negotiated link width and speed from PCI config space */
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
-
- if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
- return VXGE_HW_ERR_INVALID_PCI_INFO;
-
- switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
- case PCIE_LNK_WIDTH_RESRV:
- case PCIE_LNK_X1:
- case PCIE_LNK_X2:
- case PCIE_LNK_X4:
- case PCIE_LNK_X8:
- break;
- default:
- return VXGE_HW_ERR_INVALID_PCI_INFO;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_initialize
- * Initialize Titan-V hardware.
- */
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id)) {
- /* Validate the pci-e link width and speed */
- status = __vxge_hw_verify_pci_e_info(hldev);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
- struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
- struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
- struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- u64 data0 = 0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- fw_date->day =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
- fw_date->month =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
- fw_date->year =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
-
- snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- fw_date->month, fw_date->day, fw_date->year);
-
- fw_version->major =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
- fw_version->minor =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
- fw_version->build =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-
- snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- fw_version->major, fw_version->minor, fw_version->build);
-
- flash_date->day =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
- flash_date->month =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
- flash_date->year =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
-
- snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- flash_date->month, flash_date->day, flash_date->year);
-
- flash_version->major =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
- flash_version->minor =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
- flash_version->build =
- (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
-
- snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- flash_version->major, flash_version->minor,
- flash_version->build);
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- __be64 *serial_number = (void *)hw_info->serial_number;
- __be64 *product_desc = (void *)hw_info->product_desc;
- __be64 *part_number = (void *)hw_info->part_number;
- enum vxge_hw_status status;
- u64 data0, data1 = 0, steer_ctrl = 0;
- u32 i, j = 0;
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- serial_number[0] = cpu_to_be64(data0);
- serial_number[1] = cpu_to_be64(data1);
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- part_number[0] = cpu_to_be64(data0);
- part_number[1] = cpu_to_be64(data1);
-
- for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
- i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
- data0 = i;
- data1 = steer_ctrl = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- product_desc[j++] = cpu_to_be64(data0);
- product_desc[j++] = cpu_to_be64(data1);
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- data0 = 0;
-
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_FW_API_GET_FUNC_MODE,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- return status;
-
- hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
- return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- * from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
- u8 *macaddr, u8 *macaddr_mask)
-{
- u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- data0 = 0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
- int i;
-
- do {
- status = vxge_hw_vpath_fw_api(vpath, action,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data0, &data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
- data1);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i - 1] = (u8) (data0 & 0xFF);
- data0 >>= 8;
-
- macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
- data1 >>= 8;
- }
-
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
- data0 = 0, data1 = 0, steer_ctrl = 0;
-
- } while (!is_valid_ether_addr(macaddr));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_device_hw_info_get - Get the hw information
- * @bar0: the bar
- * @hw_info: the hw_info struct
- *
- * Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver, FW version information, and the first mac address for
- * each vpath
- */
-enum vxge_hw_status
-vxge_hw_device_hw_info_get(void __iomem *bar0,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u32 i;
- u64 val64;
- struct vxge_hw_toc_reg __iomem *toc;
- struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- enum vxge_hw_status status;
- struct __vxge_hw_virtualpath vpath;
-
- memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
-
- toc = __vxge_hw_device_toc_get(bar0);
- if (toc == NULL) {
- status = VXGE_HW_ERR_CRITICAL;
- goto exit;
- }
-
- val64 = readq(&toc->toc_common_pointer);
- common_reg = bar0 + val64;
-
- status = __vxge_hw_device_vpath_reset_in_prog_check(
- (u64 __iomem *)&common_reg->vpath_rst_in_prog);
- if (status != VXGE_HW_OK)
- goto exit;
-
- hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
-
- val64 = readq(&common_reg->host_type_assignments);
-
- hw_info->host_type =
- (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
- continue;
-
- val64 = readq(&toc->toc_vpmgmt_pointer[i]);
-
- vpmgmt_reg = bar0 + val64;
-
- hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
- if (__vxge_hw_device_access_rights_get(hw_info->host_type,
- hw_info->func_id) &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
-
- val64 = readq(&toc->toc_mrpcim_pointer);
-
- mrpcim_reg = bar0 + val64;
-
- writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
- wmb();
- }
-
- val64 = readq(&toc->toc_vpath_pointer[i]);
-
- spin_lock_init(&vpath.lock);
- vpath.vp_reg = bar0 + val64;
- vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
-
- status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
- if (status != VXGE_HW_OK)
- goto exit;
-
- break;
- }
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
- continue;
-
- val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath.vp_reg = bar0 + val64;
- vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
-
- status = __vxge_hw_vpath_addr_get(&vpath,
- hw_info->mac_addrs[i],
- hw_info->mac_addr_masks[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_destroy - Deallocates the block pool
- */
-static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
-{
- struct __vxge_hw_device *hldev;
- struct list_head *p, *n;
-
- if (!blockpool)
- return;
-
- hldev = blockpool->hldev;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
- dma_unmap_single(&hldev->pdev->dev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(hldev->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- blockpool->pool_size--;
- }
-
- list_for_each_safe(p, n, &blockpool->free_entry_list) {
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- }
-
- return;
-}
-
-/*
- * __vxge_hw_blockpool_create - Create block pool
- */
-static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max)
-{
- u32 i;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (blockpool == NULL) {
- status = VXGE_HW_FAIL;
- goto blockpool_create_exit;
- }
-
- blockpool->hldev = hldev;
- blockpool->block_size = VXGE_HW_BLOCK_SIZE;
- blockpool->pool_size = 0;
- blockpool->pool_max = pool_max;
- blockpool->req_out = 0;
-
- INIT_LIST_HEAD(&blockpool->free_block_list);
- INIT_LIST_HEAD(&blockpool->free_entry_list);
-
- for (i = 0; i < pool_size + pool_max; i++) {
- entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- list_add(&entry->item, &blockpool->free_entry_list);
- }
-
- for (i = 0; i < pool_size; i++) {
- memblock = vxge_os_dma_malloc(
- hldev->pdev,
- VXGE_HW_BLOCK_SIZE,
- &dma_handle,
- &acc_handle);
- if (memblock == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
- VXGE_HW_BLOCK_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
- vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry =
- kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry != NULL) {
- list_del(&entry->item);
- entry->length = VXGE_HW_BLOCK_SIZE;
- entry->memblock = memblock;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- } else {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- }
-
-blockpool_create_exit:
- return status;
-}
-
-/*
- * __vxge_hw_device_fifo_config_check - Check fifo configuration.
- * Check the fifo configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
-{
- if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
- (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_vpath_config_check - Check vpath configuration.
- * Check the vpath configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
-{
- enum vxge_hw_status status;
-
- if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
- (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
- return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
-
- status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
- if (status != VXGE_HW_OK)
- return status;
-
- if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
- ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
- (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
- return VXGE_HW_BADCFG_VPATH_MTU;
-
- if ((vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
- return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_config_check - Check device configuration.
- * Check the device configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
-{
- u32 i;
- enum vxge_hw_status status;
-
- if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
- return VXGE_HW_BADCFG_INTR_MODE;
-
- if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
- (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
- return VXGE_HW_BADCFG_RTS_MAC_EN;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- status = __vxge_hw_device_vpath_config_check(
- &new_config->vp_config[i]);
- if (status != VXGE_HW_OK)
- return status;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_device_initialize - Initialize Titan device.
- * Initialize Titan device. Note that all the arguments of this public API
- * are 'IN', including @hldev. Driver cooperates with
- * OS to find new Titan device, locate its PCI and memory spaces.
- *
- * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
- * to enable the latter to perform Titan hardware initialization.
- */
-enum vxge_hw_status
-vxge_hw_device_initialize(
- struct __vxge_hw_device **devh,
- struct vxge_hw_device_attr *attr,
- struct vxge_hw_device_config *device_config)
-{
- u32 i;
- u32 nblocks = 0;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- status = __vxge_hw_device_config_check(device_config);
- if (status != VXGE_HW_OK)
- goto exit;
-
- hldev = vzalloc(sizeof(struct __vxge_hw_device));
- if (hldev == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- hldev->magic = VXGE_HW_DEVICE_MAGIC;
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
-
- /* apply config */
- memcpy(&hldev->config, device_config,
- sizeof(struct vxge_hw_device_config));
-
- hldev->bar0 = attr->bar0;
- hldev->pdev = attr->pdev;
-
- hldev->uld_callbacks = attr->uld_callbacks;
-
- __vxge_hw_device_pci_e_init(hldev);
-
- status = __vxge_hw_device_reg_addr_get(hldev);
- if (status != VXGE_HW_OK) {
- vfree(hldev);
- goto exit;
- }
-
- __vxge_hw_device_host_info_get(hldev);
-
- /* Incrementing for stats blocks */
- nblocks++;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpath_assignments & vxge_mBIT(i)))
- continue;
-
- if (device_config->vp_config[i].ring.enable ==
- VXGE_HW_RING_ENABLE)
- nblocks += device_config->vp_config[i].ring.ring_blocks;
-
- if (device_config->vp_config[i].fifo.enable ==
- VXGE_HW_FIFO_ENABLE)
- nblocks += device_config->vp_config[i].fifo.fifo_blocks;
- nblocks++;
- }
-
- if (__vxge_hw_blockpool_create(hldev,
- &hldev->block_pool,
- device_config->dma_blockpool_initial + nblocks,
- device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
-
- vxge_hw_device_terminate(hldev);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- status = __vxge_hw_device_initialize(hldev);
- if (status != VXGE_HW_OK) {
- vxge_hw_device_terminate(hldev);
- goto exit;
- }
-
- *devh = hldev;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_terminate - Terminate Titan device.
- * Terminate HW device.
- */
-void
-vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
-{
- vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
-
- hldev->magic = VXGE_HW_DEVICE_DEAD;
- __vxge_hw_blockpool_destroy(&hldev->block_pool);
- vfree(hldev);
-}
-
-/*
- * __vxge_hw_vpath_stats_access - Get the statistics from the given location
- * and offset and perform an operation
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_stats_access_exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->xmac_stats_access_cmd,
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
- vpath->hldev->config.device_poll_millis);
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&vp_reg->xmac_stats_access_data);
- else
- *stat = 0;
-
-vpath_stats_access_exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *)vpath_tx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset, val64);
- if (status != VXGE_HW_OK)
- goto exit;
- offset++;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
- val64 = (u64 *) vpath_rx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset >> 3, val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- val64 = readq(&vp_reg->vpath_debug_stats0);
- hw_stats->ini_num_mwr_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats1);
- hw_stats->ini_num_mrd_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats2);
- hw_stats->ini_num_cpl_rcvd =
- (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats3);
- hw_stats->ini_num_mwr_byte_sent =
- VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats4);
- hw_stats->ini_num_cpl_byte_rcvd =
- VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats5);
- hw_stats->wrcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats6);
- hw_stats->rdcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count0 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count1 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count23);
- hw_stats->vpath_genstats_count2 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count3 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count4);
- hw_stats->vpath_genstats_count4 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count5);
- hw_stats->vpath_genstats_count5 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
- val64);
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
-
- hw_stats->prog_event_vnum0 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
-
- hw_stats->prog_event_vnum1 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
-
- hw_stats->prog_event_vnum2 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
-
- hw_stats->prog_event_vnum3 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
-
- val64 = readq(&vp_reg->rx_multi_cast_stats);
- hw_stats->rx_multi_cast_frame_discard =
- (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
-
- val64 = readq(&vp_reg->rx_frm_transferred);
- hw_stats->rx_frm_transferred =
- (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
-
- val64 = readq(&vp_reg->rxd_returned);
- hw_stats->rxd_returned =
- (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_mpa);
- hw_stats->rx_mpa_len_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
- hw_stats->rx_mpa_mrk_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
- hw_stats->rx_mpa_crc_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_fau);
- hw_stats->rx_permitted_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
- hw_stats->rx_vp_reset_discarded_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
- hw_stats->rx_wol_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
-
- val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
- hw_stats->tx_vp_reset_discarded_frms =
- (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
- val64);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_stats_get - Get the device hw statistics.
- * Returns the vpath h/w stats for the device.
- */
-enum vxge_hw_status
-vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
- struct vxge_hw_device_stats_hw_info *hw_stats)
-{
- u32 i;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
- (hldev->virtual_paths[i].vp_open ==
- VXGE_HW_VP_NOT_OPEN))
- continue;
-
- memcpy(hldev->virtual_paths[i].hw_stats_sav,
- hldev->virtual_paths[i].hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(
- &hldev->virtual_paths[i],
- hldev->virtual_paths[i].hw_stats);
- }
-
- memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
- sizeof(struct vxge_hw_device_stats_hw_info));
-
- return status;
-}
-
-/*
- * vxge_hw_driver_stats_get - Get the device sw statistics.
- * Returns the vpath s/w stats for the device.
- */
-enum vxge_hw_status vxge_hw_driver_stats_get(
- struct __vxge_hw_device *hldev,
- struct vxge_hw_device_stats_sw_info *sw_stats)
-{
- memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
- sizeof(struct vxge_hw_device_stats_sw_info));
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
- * and offset and perform an operation
- * Get the statistics from the given location and offset.
- */
-enum vxge_hw_status
-vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
- u32 operation, u32 location, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
- VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &hldev->mrpcim_reg->xmac_stats_sys_cmd,
- VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
- hldev->config.device_poll_millis);
-
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
- else
- *stat = 0;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
- * Get the Statistics on aggregate port
- */
-static enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
- struct vxge_hw_xmac_aggr_stats *aggr_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *)aggr_stats;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
- status = vxge_hw_mrpcim_stats_access(hldev,
- VXGE_HW_STATS_OP_READ,
- VXGE_HW_STATS_LOC_AGGR,
- ((offset + (104 * port)) >> 3), val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
- * Get the Statistics on port
- */
-static enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
- struct vxge_hw_xmac_port_stats *port_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = 0x0;
- val64 = (u64 *) port_stats;
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
- status = vxge_hw_mrpcim_stats_access(hldev,
- VXGE_HW_STATS_OP_READ,
- VXGE_HW_STATS_LOC_AGGR,
- ((offset + (608 * port)) >> 3), val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
- * Get the XMAC Statistics
- */
-enum vxge_hw_status
-vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
- struct vxge_hw_xmac_stats *xmac_stats)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 i;
-
- status = vxge_hw_device_xmac_aggr_stats_get(hldev,
- 0, &xmac_stats->aggr_stats[0]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = vxge_hw_device_xmac_aggr_stats_get(hldev,
- 1, &xmac_stats->aggr_stats[1]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
-
- status = vxge_hw_device_xmac_port_stats_get(hldev,
- i, &xmac_stats->port_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(
- &hldev->virtual_paths[i],
- &xmac_stats->vpath_tx_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(
- &hldev->virtual_paths[i],
- &xmac_stats->vpath_rx_stats[i]);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_debug_set - Set the debug module, level and timestamp
- * This routine is used to dynamically change the debug output
- */
-void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
- enum vxge_debug_level level, u32 mask)
-{
- if (hldev == NULL)
- return;
-
-#if defined(VXGE_DEBUG_TRACE_MASK) || \
- defined(VXGE_DEBUG_ERR_MASK)
- hldev->debug_module_mask = mask;
- hldev->debug_level = level;
-#endif
-
-#if defined(VXGE_DEBUG_ERR_MASK)
- hldev->level_err = level & VXGE_ERR;
-#endif
-
-#if defined(VXGE_DEBUG_TRACE_MASK)
- hldev->level_trace = level & VXGE_TRACE;
-#endif
-}
-
-/*
- * vxge_hw_device_error_level_get - Get the error level
- * This routine returns the current error level set
- */
-u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_ERR_MASK)
- if (hldev == NULL)
- return VXGE_ERR;
- else
- return hldev->level_err;
-#else
- return 0;
-#endif
-}
-
-/*
- * vxge_hw_device_trace_level_get - Get the trace level
- * This routine returns the current trace level set
- */
-u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK)
- if (hldev == NULL)
- return VXGE_TRACE;
- else
- return hldev->level_trace;
-#else
- return 0;
-#endif
-}
-
-/*
- * vxge_hw_getpause_data -Pause frame frame generation and reception.
- * Returns the Pause frame generation and reception capability of the NIC.
- */
-enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
- u32 port, u32 *tx, u32 *rx)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
- status = VXGE_HW_ERR_INVALID_PORT;
- goto exit;
- }
-
- if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- goto exit;
- }
-
- val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
- if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
- *tx = 1;
- if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
- *rx = 1;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_setpause_data - set/reset pause frame generation.
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- */
-enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
- u32 port, u32 tx, u32 rx)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
- status = VXGE_HW_ERR_INVALID_PORT;
- goto exit;
- }
-
- status = __vxge_hw_device_is_privilaged(hldev->host_type,
- hldev->func_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
- if (tx)
- val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
- else
- val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
- if (rx)
- val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
- else
- val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
-
- writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
-exit:
- return status;
-}
-
-u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
-{
- struct pci_dev *dev = hldev->pdev;
- u16 lnk;
-
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
- return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
-}
-
-/*
- * __vxge_hw_ring_block_memblock_idx - Return the memblock index
- * This function returns the index of memory block
- */
-static inline u32
-__vxge_hw_ring_block_memblock_idx(u8 *block)
-{
- return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
-}
-
-/*
- * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
- * This function sets index to a memory block
- */
-static inline void
-__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
-{
- *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
-}
-
-/*
- * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
- * in RxD block
- * Sets the next block pointer in RxD block
- */
-static inline void
-__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
-{
- *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
-}
-
-/*
- * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
- * first block
- * Returns the dma address of the first RxD block
- */
-static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
-{
- struct vxge_hw_mempool_dma *dma_object;
-
- dma_object = ring->mempool->memblocks_dma_arr;
- vxge_assert(dma_object != NULL);
-
- return dma_object->addr;
-}
-
-/*
- * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
- * This function returns the dma address of a given item
- */
-static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
- void *item)
-{
- u32 memblock_idx;
- void *memblock;
- struct vxge_hw_mempool_dma *memblock_dma_object;
- ptrdiff_t dma_item_offset;
-
- /* get owner memblock index */
- memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
-
- /* get owner memblock by memblock index */
- memblock = mempoolh->memblocks_arr[memblock_idx];
-
- /* get memblock DMA object by memblock index */
- memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
-
- /* calculate offset in the memblock of this item */
- dma_item_offset = (u8 *)item - (u8 *)memblock;
-
- return memblock_dma_object->addr + dma_item_offset;
-}
-
-/*
- * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
- * This function returns the dma address of a given item
- */
-static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
- struct __vxge_hw_ring *ring, u32 from,
- u32 to)
-{
- u8 *to_item , *from_item;
- dma_addr_t to_dma;
-
- /* get "from" RxD block */
- from_item = mempoolh->items_arr[from];
- vxge_assert(from_item);
-
- /* get "to" RxD block */
- to_item = mempoolh->items_arr[to];
- vxge_assert(to_item);
-
- /* return address of the beginning of previous RxD block */
- to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
-
- /* set next pointer for this RxD block to point on
- * previous item's DMA start address */
- __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
-}
-
-/*
- * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
- * block callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for RxD block
- */
-static void
-__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index, u32 is_last)
-{
- u32 i;
- void *item = mempoolh->items_arr[index];
- struct __vxge_hw_ring *ring =
- (struct __vxge_hw_ring *)mempoolh->userdata;
-
- /* format rxds array */
- for (i = 0; i < ring->rxds_per_block; i++) {
- void *rxdblock_priv;
- void *uld_priv;
- struct vxge_hw_ring_rxd_1 *rxdp;
-
- u32 reserve_index = ring->channel.reserve_ptr -
- (index * ring->rxds_per_block + i + 1);
- u32 memblock_item_idx;
-
- ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
- i * ring->rxd_size;
-
- /* Note: memblock_item_idx is index of the item within
- * the memblock. For instance, in case of three RxD-blocks
- * per memblock this value can be 0, 1 or 2. */
- rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
- memblock_index, item,
- &memblock_item_idx);
-
- rxdp = ring->channel.reserve_arr[reserve_index];
-
- uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
-
- /* pre-format Host_Control */
- rxdp->host_control = (u64)(size_t)uld_priv;
- }
-
- __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
-
- if (is_last) {
- /* link last one with first one */
- __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
- }
-
- if (index > 0) {
- /* link this RxD block with previous one */
- __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
- }
-}
-
-/*
- * __vxge_hw_ring_replenish - Initial replenish of RxDs
- * This function replenishes the RxDs from reserve array to work array
- */
-static enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
-{
- void *rxd;
- struct __vxge_hw_channel *channel;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- channel = &ring->channel;
-
- while (vxge_hw_channel_dtr_count(channel) > 0) {
-
- status = vxge_hw_ring_rxd_reserve(ring, &rxd);
-
- vxge_assert(status == VXGE_HW_OK);
-
- if (ring->rxd_init) {
- status = ring->rxd_init(rxd, channel->userdata);
- if (status != VXGE_HW_OK) {
- vxge_hw_ring_rxd_free(ring, rxd);
- goto exit;
- }
- }
-
- vxge_hw_ring_rxd_post(ring, rxd);
- }
- status = VXGE_HW_OK;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_channel_allocate - Allocate memory for channel
- * This function allocates required memory for the channel and various arrays
- * in the channel
- */
-static struct __vxge_hw_channel *
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type,
- u32 length, u32 per_dtr_space,
- void *userdata)
-{
- struct __vxge_hw_channel *channel;
- struct __vxge_hw_device *hldev;
- int size = 0;
- u32 vp_id;
-
- hldev = vph->vpath->hldev;
- vp_id = vph->vpath->vp_id;
-
- switch (type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- size = sizeof(struct __vxge_hw_fifo);
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- size = sizeof(struct __vxge_hw_ring);
- break;
- default:
- break;
- }
-
- channel = kzalloc(size, GFP_KERNEL);
- if (channel == NULL)
- goto exit0;
- INIT_LIST_HEAD(&channel->item);
-
- channel->common_reg = hldev->common_reg;
- channel->first_vp_id = hldev->first_vp_id;
- channel->type = type;
- channel->devh = hldev;
- channel->vph = vph;
- channel->userdata = userdata;
- channel->per_dtr_space = per_dtr_space;
- channel->length = length;
- channel->vp_id = vp_id;
-
- channel->work_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->work_arr == NULL)
- goto exit1;
-
- channel->free_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->free_arr == NULL)
- goto exit1;
- channel->free_ptr = length;
-
- channel->reserve_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->reserve_arr == NULL)
- goto exit1;
- channel->reserve_ptr = length;
- channel->reserve_top = 0;
-
- channel->orig_arr = kcalloc(length, sizeof(void *), GFP_KERNEL);
- if (channel->orig_arr == NULL)
- goto exit1;
-
- return channel;
-exit1:
- __vxge_hw_channel_free(channel);
-
-exit0:
- return NULL;
-}
-
-/*
- * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
- * Adds a block to block pool
- */
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
-{
- struct __vxge_hw_blockpool *blockpool;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- dma_addr_t dma_addr;
-
- blockpool = &devh->block_pool;
-
- if (block_addr == NULL) {
- blockpool->req_out--;
- goto exit;
- }
-
- dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length,
- DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) {
- vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
- blockpool->req_out--;
- goto exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry) {
- entry->length = length;
- entry->memblock = block_addr;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_h;
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- blockpool->req_out--;
-
-exit:
- return;
-}
-
-static inline void
-vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
-{
- void *vaddr;
-
- vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-/*
- * __vxge_hw_blockpool_blocks_add - Request additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
-{
- u32 nreq = 0, i;
-
- if ((blockpool->pool_size + blockpool->req_out) <
- VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
- nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
- blockpool->req_out += nreq;
- }
-
- for (i = 0; i < nreq; i++)
- vxge_os_dma_malloc_async(
- (blockpool->hldev)->pdev,
- blockpool->hldev, VXGE_HW_BLOCK_SIZE);
-}
-
-/*
- * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
- * Allocates a block of memory of given size, either from block pool
- * or by calling vxge_os_dma_malloc()
- */
-static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- void *memblock = NULL;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
-
- memblock = vxge_os_dma_malloc(devh->pdev, size,
- &dma_object->handle,
- &dma_object->acc_handle);
-
- if (!memblock)
- goto exit;
-
- dma_object->addr = dma_map_single(&devh->pdev->dev, memblock,
- size, DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) {
- vxge_os_dma_free(devh->pdev, memblock,
- &dma_object->acc_handle);
- memblock = NULL;
- goto exit;
- }
-
- } else {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- dma_object->addr = entry->dma_addr;
- dma_object->handle = entry->dma_handle;
- dma_object->acc_handle = entry->acc_handle;
- memblock = entry->memblock;
-
- list_add(&entry->item,
- &blockpool->free_entry_list);
- blockpool->pool_size--;
- }
-
- if (memblock != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
- }
-exit:
- return memblock;
-}
-
-/*
- * __vxge_hw_blockpool_blocks_remove - Free additional blocks
- */
-static void
-__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
-
- if (blockpool->pool_size < blockpool->pool_max)
- break;
-
- dma_unmap_single(&(blockpool->hldev)->pdev->dev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(
- (blockpool->hldev)->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
-
- list_add(p, &blockpool->free_entry_list);
-
- blockpool->pool_size--;
-
- }
-}
-
-/*
- * __vxge_hw_blockpool_free - Frees the memory allcoated with
- * __vxge_hw_blockpool_malloc
- */
-static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
- void *memblock, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
- dma_unmap_single(&devh->pdev->dev, dma_object->addr, size,
- DMA_BIDIRECTIONAL);
- vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
- } else {
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = vmalloc(sizeof(
- struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry != NULL) {
- entry->length = size;
- entry->memblock = memblock;
- entry->dma_addr = dma_object->addr;
- entry->acc_handle = dma_object->acc_handle;
- entry->dma_handle = dma_object->handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
- if (status == VXGE_HW_OK)
- __vxge_hw_blockpool_blocks_remove(blockpool);
- }
-}
-
-/*
- * vxge_hw_mempool_destroy
- */
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
-{
- u32 i, j;
- struct __vxge_hw_device *devh = mempool->devh;
-
- for (i = 0; i < mempool->memblocks_allocated; i++) {
- struct vxge_hw_mempool_dma *dma_object;
-
- vxge_assert(mempool->memblocks_arr[i]);
- vxge_assert(mempool->memblocks_dma_arr + i);
-
- dma_object = mempool->memblocks_dma_arr + i;
-
- for (j = 0; j < mempool->items_per_memblock; j++) {
- u32 index = i * mempool->items_per_memblock + j;
-
- /* to skip last partially filled(if any) memblock */
- if (index >= mempool->items_current)
- break;
- }
-
- vfree(mempool->memblocks_priv_arr[i]);
-
- __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
- mempool->memblock_size, dma_object);
- }
-
- vfree(mempool->items_arr);
- vfree(mempool->memblocks_dma_arr);
- vfree(mempool->memblocks_priv_arr);
- vfree(mempool->memblocks_arr);
- vfree(mempool);
-}
-
-/*
- * __vxge_hw_mempool_grow
- * Will resize mempool up to %num_allocate value.
- */
-static enum vxge_hw_status
-__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
- u32 *num_allocated)
-{
- u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
- u32 n_items = mempool->items_per_memblock;
- u32 start_block_idx = mempool->memblocks_allocated;
- u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- *num_allocated = 0;
-
- if (end_block_idx > mempool->memblocks_max) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- for (i = start_block_idx; i < end_block_idx; i++) {
- u32 j;
- u32 is_last = ((end_block_idx - 1) == i);
- struct vxge_hw_mempool_dma *dma_object =
- mempool->memblocks_dma_arr + i;
- void *the_memblock;
-
- /* allocate memblock's private part. Each DMA memblock
- * has a space allocated for item's private usage upon
- * mempool's user request. Each time mempool grows, it will
- * allocate new memblock and its private part at once.
- * This helps to minimize memory usage a lot. */
- mempool->memblocks_priv_arr[i] =
- vzalloc(array_size(mempool->items_priv_size, n_items));
- if (mempool->memblocks_priv_arr[i] == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- /* allocate DMA-capable memblock */
- mempool->memblocks_arr[i] =
- __vxge_hw_blockpool_malloc(mempool->devh,
- mempool->memblock_size, dma_object);
- if (mempool->memblocks_arr[i] == NULL) {
- vfree(mempool->memblocks_priv_arr[i]);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- (*num_allocated)++;
- mempool->memblocks_allocated++;
-
- memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
-
- the_memblock = mempool->memblocks_arr[i];
-
- /* fill the items hash array */
- for (j = 0; j < n_items; j++) {
- u32 index = i * n_items + j;
-
- if (first_time && index >= mempool->items_initial)
- break;
-
- mempool->items_arr[index] =
- ((char *)the_memblock + j*mempool->item_size);
-
- /* let caller to do more job on each item */
- if (mempool->item_func_alloc != NULL)
- mempool->item_func_alloc(mempool, i,
- dma_object, index, is_last);
-
- mempool->items_current = index + 1;
- }
-
- if (first_time && mempool->items_current ==
- mempool->items_initial)
- break;
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_mempool_create
- * This function will create memory pool object. Pool may grow but will
- * never shrink. Pool consists of number of dynamically allocated blocks
- * with size enough to hold %items_initial number of items. Memory is
- * DMA-able but client must map/unmap before interoperating with the device.
- */
-static struct vxge_hw_mempool *
-__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 items_priv_size,
- u32 items_initial,
- u32 items_max,
- const struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 memblocks_to_allocate;
- struct vxge_hw_mempool *mempool = NULL;
- u32 allocated;
-
- if (memblock_size < item_size) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- mempool = vzalloc(sizeof(struct vxge_hw_mempool));
- if (mempool == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- mempool->devh = devh;
- mempool->memblock_size = memblock_size;
- mempool->items_max = items_max;
- mempool->items_initial = items_initial;
- mempool->item_size = item_size;
- mempool->items_priv_size = items_priv_size;
- mempool->item_func_alloc = mp_callback->item_func_alloc;
- mempool->userdata = userdata;
-
- mempool->memblocks_allocated = 0;
-
- mempool->items_per_memblock = memblock_size / item_size;
-
- mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
- mempool->items_per_memblock;
-
- /* allocate array of memblocks */
- mempool->memblocks_arr =
- vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
- if (mempool->memblocks_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate array of private parts of items per memblocks */
- mempool->memblocks_priv_arr =
- vzalloc(array_size(sizeof(void *), mempool->memblocks_max));
- if (mempool->memblocks_priv_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate array of memblocks DMA objects */
- mempool->memblocks_dma_arr =
- vzalloc(array_size(sizeof(struct vxge_hw_mempool_dma),
- mempool->memblocks_max));
- if (mempool->memblocks_dma_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* allocate hash array of items */
- mempool->items_arr = vzalloc(array_size(sizeof(void *),
- mempool->items_max));
- if (mempool->items_arr == NULL) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
- /* calculate initial number of memblocks */
- memblocks_to_allocate = (mempool->items_initial +
- mempool->items_per_memblock - 1) /
- mempool->items_per_memblock;
-
- /* pre-allocate the mempool */
- status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
- &allocated);
- if (status != VXGE_HW_OK) {
- __vxge_hw_mempool_destroy(mempool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- mempool = NULL;
- goto exit;
- }
-
-exit:
- return mempool;
-}
-
-/*
- * __vxge_hw_ring_abort - Returns the RxD
- * This function terminates the RxDs of ring
- */
-static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
-{
- void *rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(channel, &rxdh);
-
- if (rxdh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(channel);
-
- if (ring->rxd_term)
- ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
- channel->userdata);
-
- vxge_hw_channel_dtr_free(channel, rxdh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_ring_reset - Resets the ring
- * This function resets the ring during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- __vxge_hw_ring_abort(ring);
-
- status = __vxge_hw_channel_reset(channel);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_ring_delete - Removes the ring
- * This function freeup the memory pool and removes the ring
- */
-static enum vxge_hw_status
-__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_ring *ring = vp->vpath->ringh;
-
- __vxge_hw_ring_abort(ring);
-
- if (ring->mempool)
- __vxge_hw_mempool_destroy(ring->mempool);
-
- vp->vpath->ringh = NULL;
- __vxge_hw_channel_free(&ring->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_ring_create - Create a Ring
- * This function creates Ring and initializes it.
- */
-static enum vxge_hw_status
-__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_ring_attr *attr)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_ring *ring;
- u32 ring_length;
- struct vxge_hw_ring_config *config;
- struct __vxge_hw_device *hldev;
- u32 vp_id;
- static const struct vxge_hw_mempool_cbs ring_mp_callback = {
- .item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
- };
-
- if ((vp == NULL) || (attr == NULL)) {
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- hldev = vp->vpath->hldev;
- vp_id = vp->vpath->vp_id;
-
- config = &hldev->config.vp_config[vp_id].ring;
-
- ring_length = config->ring_blocks *
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
- ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_RING,
- ring_length,
- attr->per_rxd_space,
- attr->userdata);
- if (ring == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- vp->vpath->ringh = ring;
- ring->vp_id = vp_id;
- ring->vp_reg = vp->vpath->vp_reg;
- ring->common_reg = hldev->common_reg;
- ring->stats = &vp->vpath->sw_stats->ring_stats;
- ring->config = config;
- ring->callback = attr->callback;
- ring->rxd_init = attr->rxd_init;
- ring->rxd_term = attr->rxd_term;
- ring->buffer_mode = config->buffer_mode;
- ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
- ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
- ring->rxds_limit = config->rxds_limit;
-
- ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
- ring->rxd_priv_size =
- sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
- ring->per_rxd_space = attr->per_rxd_space;
-
- ring->rxd_priv_size =
- ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
- /* how many RxDs can fit into one block. Depends on configured
- * buffer_mode. */
- ring->rxds_per_block =
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
- /* calculate actual RxD block private size */
- ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
- ring->mempool = __vxge_hw_mempool_create(hldev,
- VXGE_HW_BLOCK_SIZE,
- VXGE_HW_BLOCK_SIZE,
- ring->rxdblock_priv_size,
- ring->config->ring_blocks,
- ring->config->ring_blocks,
- &ring_mp_callback,
- ring);
- if (ring->mempool == NULL) {
- __vxge_hw_ring_delete(vp);
- return VXGE_HW_ERR_OUT_OF_MEMORY;
- }
-
- status = __vxge_hw_channel_initialize(&ring->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
- }
-
- /* Note:
- * Specifying rxd_init callback means two things:
- * 1) rxds need to be initialized by driver at channel-open time;
- * 2) rxds need to be posted at channel-open time
- * (that's what the initial_replenish() below does)
- * Currently we don't have a case when the 1) is done without the 2).
- */
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
- }
- }
-
- /* initial replenish will increment the counter in its post() routine,
- * we have to reset it */
- ring->stats->common_stats.usage_cnt = 0;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_device_config_default_get - Initialize device config with defaults.
- * Initialize Titan device config with default values.
- */
-enum vxge_hw_status
-vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
-{
- u32 i;
-
- device_config->dma_blockpool_initial =
- VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
- device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
- device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
- device_config->rth_en = VXGE_HW_RTH_DEFAULT;
- device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
- device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
- device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- device_config->vp_config[i].vp_id = i;
-
- device_config->vp_config[i].min_bandwidth =
- VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
-
- device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
-
- device_config->vp_config[i].ring.ring_blocks =
- VXGE_HW_DEF_RING_BLOCKS;
-
- device_config->vp_config[i].ring.buffer_mode =
- VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
-
- device_config->vp_config[i].ring.scatter_mode =
- VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].ring.rxds_limit =
- VXGE_HW_DEF_RING_RXDS_LIMIT;
-
- device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
-
- device_config->vp_config[i].fifo.fifo_blocks =
- VXGE_HW_MIN_FIFO_BLOCKS;
-
- device_config->vp_config[i].fifo.max_frags =
- VXGE_HW_MAX_FIFO_FRAGS;
-
- device_config->vp_config[i].fifo.memblock_size =
- VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
-
- device_config->vp_config[i].fifo.alignment_size =
- VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
-
- device_config->vp_config[i].fifo.intr =
- VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
-
- device_config->vp_config[i].fifo.no_snoop_bits =
- VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
- device_config->vp_config[i].tti.intr_enable =
- VXGE_HW_TIM_INTR_DEFAULT;
-
- device_config->vp_config[i].tti.btimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ac_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ci_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.timer_ri_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.rtimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.util_sel =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.ltimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.urange_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].tti.uec_d =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.intr_enable =
- VXGE_HW_TIM_INTR_DEFAULT;
-
- device_config->vp_config[i].rti.btimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ac_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ci_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.timer_ri_en =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.rtimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.util_sel =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.ltimer_val =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_a =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_b =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.urange_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_c =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].rti.uec_d =
- VXGE_HW_USE_FLASH_DEFAULT;
-
- device_config->vp_config[i].mtu =
- VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
-
- device_config->vp_config[i].rpa_strip_vlan_tag =
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
- * Set the swapper bits appropriately for the vpath.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
-#ifndef __BIG_ENDIAN
- u64 val64;
-
- val64 = readq(&vpath_reg->vpath_general_cfg1);
- wmb();
- val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
- writeq(val64, &vpath_reg->vpath_general_cfg1);
- wmb();
-#endif
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
- * Set the swapper bits appropriately for the vpath.
- */
-static enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
- u64 val64;
-
- val64 = readq(&legacy_reg->pifm_wr_swap_en);
-
- if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
- val64 = readq(&vpath_reg->kdfcctl_cfg0);
- wmb();
-
- val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
- VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
- VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
-
- writeq(val64, &vpath_reg->kdfcctl_cfg0);
- wmb();
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_hw_mgmt_reg_read - Read Titan register.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
- enum vxge_hw_mgmt_reg_type type,
- u32 index, u32 offset, u64 *value)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- switch (type) {
- case vxge_hw_mgmt_reg_type_legacy:
- if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->legacy_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_toc:
- if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->toc_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_common:
- if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->common_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_mrpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_srpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->srpcim_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpmgmt:
- if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpath:
- if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- *value = readq((void __iomem *)hldev->vpath_reg[index] +
- offset);
- break;
- default:
- status = VXGE_HW_ERR_INVALID_TYPE;
- break;
- }
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
- */
-enum vxge_hw_status
-vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
-{
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- int i = 0, j = 0;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((vpath_mask) & vxge_mBIT(i)))
- continue;
- vpmgmt_reg = hldev->vpmgmt_reg[i];
- for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
- if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
- & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
- return VXGE_HW_FAIL;
- }
- }
- return VXGE_HW_OK;
-}
-/*
- * vxge_hw_mgmt_reg_Write - Write Titan register.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
- enum vxge_hw_mgmt_reg_type type,
- u32 index, u32 offset, u64 value)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- switch (type) {
- case vxge_hw_mgmt_reg_type_legacy:
- if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->legacy_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_toc:
- if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->toc_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_common:
- if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->common_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_mrpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
- break;
- case vxge_hw_mgmt_reg_type_srpcim:
- if (!(hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
- break;
- }
- if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
- offset);
-
- break;
- case vxge_hw_mgmt_reg_type_vpmgmt:
- if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
- offset);
- break;
- case vxge_hw_mgmt_reg_type_vpath:
- if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
- (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
- status = VXGE_HW_ERR_INVALID_INDEX;
- break;
- }
- if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
- status = VXGE_HW_ERR_INVALID_OFFSET;
- break;
- }
- writeq(value, (void __iomem *)hldev->vpath_reg[index] +
- offset);
- break;
- default:
- status = VXGE_HW_ERR_INVALID_TYPE;
- break;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
- void *txdlh;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
- if (txdlh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(&fifo->channel);
-
- if (fifo->txdl_term) {
- fifo->txdl_term(txdlh,
- VXGE_HW_TXDL_STATE_POSTED,
- fifo->channel.userdata);
- }
-
- vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_fifo_abort(fifo);
- status = __vxge_hw_channel_reset(&fifo->channel);
-
- return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
- __vxge_hw_fifo_abort(fifo);
-
- if (fifo->mempool)
- __vxge_hw_mempool_destroy(fifo->mempool);
-
- vp->vpath->fifoh = NULL;
-
- __vxge_hw_channel_free(&fifo->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
- * list callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for TxD list
- */
-static void
-__vxge_hw_fifo_mempool_item_alloc(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
- u32 index, u32 is_last)
-{
- u32 memblock_item_idx;
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp =
- (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
- struct __vxge_hw_fifo *fifo =
- (struct __vxge_hw_fifo *)mempoolh->userdata;
- void *memblock = mempoolh->memblocks_arr[memblock_index];
-
- vxge_assert(txdp);
-
- txdp->host_control = (u64) (size_t)
- __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
- &memblock_item_idx);
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
-
- vxge_assert(txdl_priv);
-
- fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
-
- /* pre-format HW's TxDL's private */
- txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
- txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
- txdl_priv->dma_handle = dma_object->handle;
- txdl_priv->memblock = memblock;
- txdl_priv->first_txdp = txdp;
- txdl_priv->next_txdl_priv = NULL;
- txdl_priv->alloc_frags = 0;
-}
-
-/*
- * __vxge_hw_fifo_create - Create a FIFO
- * This function creates FIFO and initializes it.
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_fifo_attr *attr)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_fifo *fifo;
- struct vxge_hw_fifo_config *config;
- u32 txdl_size, txdl_per_memblock;
- struct vxge_hw_mempool_cbs fifo_mp_callback;
- struct __vxge_hw_virtualpath *vpath;
-
- if ((vp == NULL) || (attr == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
- vpath = vp->vpath;
- config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
-
- txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
-
- txdl_per_memblock = config->memblock_size / txdl_size;
-
- fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_FIFO,
- config->fifo_blocks * txdl_per_memblock,
- attr->per_txdl_space, attr->userdata);
-
- if (fifo == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- vpath->fifoh = fifo;
- fifo->nofl_db = vpath->nofl_db;
-
- fifo->vp_id = vpath->vp_id;
- fifo->vp_reg = vpath->vp_reg;
- fifo->stats = &vpath->sw_stats->fifo_stats;
-
- fifo->config = config;
-
- /* apply "interrupts per txdl" attribute */
- fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
- fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
- fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
-
- if (fifo->config->intr)
- fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
-
- fifo->no_snoop_bits = config->no_snoop_bits;
-
- /*
- * FIFO memory management strategy:
- *
- * TxDL split into three independent parts:
- * - set of TxD's
- * - TxD HW private part
- * - driver private part
- *
- * Adaptative memory allocation used. i.e. Memory allocated on
- * demand with the size which will fit into one memory block.
- * One memory block may contain more than one TxDL.
- *
- * During "reserve" operations more memory can be allocated on demand
- * for example due to FIFO full condition.
- *
- * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
- * routine which will essentially stop the channel and free resources.
- */
-
- /* TxDL common private size == TxDL private + driver private */
- fifo->priv_size =
- sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
- fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
- fifo->per_txdl_space = attr->per_txdl_space;
-
- /* recompute txdl size to be cacheline aligned */
- fifo->txdl_size = txdl_size;
- fifo->txdl_per_memblock = txdl_per_memblock;
-
- fifo->txdl_term = attr->txdl_term;
- fifo->callback = attr->callback;
-
- if (fifo->txdl_per_memblock == 0) {
- __vxge_hw_fifo_delete(vp);
- status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
- goto exit;
- }
-
- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
-
- fifo->mempool =
- __vxge_hw_mempool_create(vpath->hldev,
- fifo->config->memblock_size,
- fifo->txdl_size,
- fifo->priv_size,
- (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
- (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
- &fifo_mp_callback,
- fifo);
-
- if (fifo->mempool == NULL) {
- __vxge_hw_fifo_delete(vp);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- status = __vxge_hw_channel_initialize(&fifo->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_fifo_delete(vp);
- goto exit;
- }
-
- vxge_assert(fifo->channel.reserve_ptr);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_read - Read the content of given address
- * in pci config space.
- * Read from the vpath pci config space.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
- u32 phy_func_0, u32 offset, u32 *val)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
-
- if (phy_func_0)
- val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
-
- writeq(val64, &vp_reg->pci_config_access_cfg1);
- wmb();
- writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
- &vp_reg->pci_config_access_cfg2);
- wmb();
-
- status = __vxge_hw_device_register_poll(
- &vp_reg->pci_config_access_cfg2,
- VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->pci_config_access_status);
-
- if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
- status = VXGE_HW_FAIL;
- *val = 0;
- } else
- *val = (u32)vxge_bVALn(val64, 32, 32);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_device_flick_link_led - Flick (blink) link LED.
- * @hldev: HW device.
- * @on_off: TRUE if flickering to be on, FALSE to be off
- *
- * Flicker the link LED.
- */
-enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
-{
- struct __vxge_hw_virtualpath *vpath;
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- if (hldev == NULL) {
- status = VXGE_HW_ERR_INVALID_DEVICE;
- goto exit;
- }
-
- vpath = &hldev->virtual_paths[hldev->first_vp_id];
-
- data0 = on_off;
- status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
- 0, &data0, &data1, &steer_ctrl);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
- */
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
- u32 action, u32 rts_table, u32 offset,
- u64 *data0, u64 *data1)
-{
- enum vxge_hw_status status;
- u64 steer_ctrl = 0;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- if ((rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
- (rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
- (rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
- steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
- }
-
- status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
- data0, data1, &steer_ctrl);
- if (status != VXGE_HW_OK)
- goto exit;
-
- if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
- (rts_table !=
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
- *data1 = 0;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
- */
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
- u32 rts_table, u32 offset, u64 steer_data0,
- u64 steer_data1)
-{
- u64 data0, data1 = 0, steer_ctrl = 0;
- enum vxge_hw_status status;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- data0 = steer_data0;
-
- if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
- data1 = steer_data1;
-
- status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
- &data0, &data1, &steer_ctrl);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
- */
-enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
- struct __vxge_hw_vpath_handle *vp,
- enum vxge_hw_rth_algoritms algorithm,
- struct vxge_hw_rth_hash_types *hash_type,
- u16 bucket_size)
-{
- u64 data0, data1;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
- 0, &data0, &data1);
- if (status != VXGE_HW_OK)
- goto exit;
-
- data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
-
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
-
- if (hash_type->hash_type_tcpipv4_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
-
- if (hash_type->hash_type_ipv4_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
-
- if (hash_type->hash_type_tcpipv6_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
-
- if (hash_type->hash_type_ipv6_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
-
- if (hash_type->hash_type_tcpipv6ex_en)
- data0 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
-
- if (hash_type->hash_type_ipv6ex_en)
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
-
- if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
- data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
- else
- data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
- 0, data0, 0);
-exit:
- return status;
-}
-
-static void
-vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
- u16 flag, u8 *itable)
-{
- switch (flag) {
- case 1:
- *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 2:
- *data0 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 3:
- *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
- itable[j]);
- fallthrough;
- case 4:
- *data1 |=
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
- itable[j]);
- return;
- default:
- return;
- }
-}
-/*
- * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
- */
-enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
- struct __vxge_hw_vpath_handle **vpath_handles,
- u32 vpath_count,
- u8 *mtable,
- u8 *itable,
- u32 itable_size)
-{
- u32 i, j, action, rts_table;
- u64 data0;
- u64 data1;
- u32 max_entries;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- max_entries = (((u32)1) << itable_size);
-
- if (vp->vpath->hldev->config.rth_it_type
- == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
- rts_table =
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
-
- for (j = 0; j < max_entries; j++) {
-
- data1 = 0;
-
- data0 =
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
- itable[j]);
-
- status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
- action, rts_table, j, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- for (j = 0; j < max_entries; j++) {
-
- data1 = 0;
-
- data0 =
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
- VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
- itable[j]);
-
- status = __vxge_hw_vpath_rts_table_set(
- vpath_handles[mtable[itable[j]]], action,
- rts_table, j, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
- } else {
- action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
- rts_table =
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
- for (i = 0; i < vpath_count; i++) {
-
- for (j = 0; j < max_entries;) {
-
- data0 = 0;
- data1 = 0;
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 1, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 2, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 3, itable);
- j++;
- break;
- }
-
- while (j < max_entries) {
- if (mtable[itable[j]] != i) {
- j++;
- continue;
- }
- vxge_hw_rts_rth_data0_data1_get(j,
- &data0, &data1, 4, itable);
- j++;
- break;
- }
-
- if (data0 != 0) {
- status = __vxge_hw_vpath_rts_table_set(
- vpath_handles[i],
- action, rts_table,
- 0, data0, data1);
-
- if (status != VXGE_HW_OK)
- goto exit;
- }
- }
- }
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_check_leak - Check for memory leak
- * @ring: Handle to the ring object used for receive
- *
- * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
- * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
- * Returns: VXGE_HW_FAIL, if leak has occurred.
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- u64 rxd_new_count, rxd_spat;
-
- if (ring == NULL)
- return status;
-
- rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
- rxd_spat = readq(&ring->vp_reg->prc_cfg6);
- rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
-
- if (rxd_new_count >= rxd_spat)
- status = VXGE_HW_FAIL;
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_mgmt_read
- * This routine reads the vpath_mgmt registers
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_mgmt_read(
- struct __vxge_hw_device *hldev,
- struct __vxge_hw_virtualpath *vpath)
-{
- u32 i, mtu = 0, max_pyld = 0;
- u64 val64;
-
- for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
-
- val64 = readq(&vpath->vpmgmt_reg->
- rxmac_cfg0_port_vpmgmt_clone[i]);
- max_pyld =
- (u32)
- VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
- (val64);
- if (mtu < max_pyld)
- mtu = max_pyld;
- }
-
- vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
-
- val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (val64 & vxge_mBIT(i))
- vpath->vsport_number = i;
- }
-
- val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
-
- if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
- VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
- else
- VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
- * This routine checks the vpath_rst_in_prog register to see if
- * adapter completed the reset process for the vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
-{
- enum vxge_hw_status status;
-
- status = __vxge_hw_device_register_poll(
- &vpath->hldev->common_reg->vpath_rst_in_prog,
- VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
- 1 << (16 - vpath->vp_id)),
- vpath->hldev->config.device_poll_millis);
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_reset
- * This routine resets the vpath on the device
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
-
- val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->cmn_rsthdlr_cfg0);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_sw_reset
- * This routine resets the vpath structures
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->ringh) {
- status = __vxge_hw_ring_reset(vpath->ringh);
- if (status != VXGE_HW_OK)
- goto exit;
- }
-
- if (vpath->fifoh)
- status = __vxge_hw_fifo_reset(vpath->fifoh);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_prc_configure
- * This routine configures the prc registers of virtual path using the config
- * passed
- */
-static void
-__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- vp_config = vpath->vp_config;
-
- if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
- return;
-
- val64 = readq(&vp_reg->prc_cfg1);
- val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
- writeq(val64, &vp_reg->prc_cfg1);
-
- val64 = readq(&vpath->vp_reg->prc_cfg6);
- val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
- writeq(val64, &vpath->vp_reg->prc_cfg6);
-
- val64 = readq(&vp_reg->prc_cfg7);
-
- if (vpath->vp_config->ring.scatter_mode !=
- VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
-
- val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
-
- switch (vpath->vp_config->ring.scatter_mode) {
- case VXGE_HW_RING_SCATTER_MODE_A:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
- break;
- case VXGE_HW_RING_SCATTER_MODE_B:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
- break;
- case VXGE_HW_RING_SCATTER_MODE_C:
- val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
- VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
- break;
- }
- }
-
- writeq(val64, &vp_reg->prc_cfg7);
-
- writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
- __vxge_hw_ring_first_block_address_get(
- vpath->ringh) >> 3), &vp_reg->prc_cfg5);
-
- val64 = readq(&vp_reg->prc_cfg4);
- val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
- val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
-
- val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
- VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
-
- if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
- val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
- else
- val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
-
- writeq(val64, &vp_reg->prc_cfg4);
-}
-
-/*
- * __vxge_hw_vpath_kdfc_configure
- * This routine configures the kdfc registers of virtual path using the
- * config passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- u64 vpath_stride;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
-
- vpath->max_kdfc_db =
- (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
- val64+1)/2;
-
- if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-
- vpath->max_nofl_db = vpath->max_kdfc_db;
-
- if (vpath->max_nofl_db <
- ((vpath->vp_config->fifo.memblock_size /
- (vpath->vp_config->fifo.max_frags *
- sizeof(struct vxge_hw_fifo_txd))) *
- vpath->vp_config->fifo.fifo_blocks)) {
-
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
- }
- val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
- (vpath->max_nofl_db*2)-1);
- }
-
- writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
-
- writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
- &vp_reg->kdfc_fifo_trpl_ctrl);
-
- val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
-
- val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
-
- val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
-#ifndef __BIG_ENDIAN
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
-#endif
- VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
-
- writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
- writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
- wmb();
- vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
-
- vpath->nofl_db =
- (struct __vxge_hw_non_offload_db_wrapper __iomem *)
- (hldev->kdfc + (vp_id *
- VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
- vpath_stride)));
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_mac_configure
- * This routine configures the mac of virtual path using the config passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- vp_config = vpath->vp_config;
-
- writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
- vpath->vsport_number), &vp_reg->xmac_vsport_choice);
-
- if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
-
- val64 = readq(&vp_reg->xmac_rpa_vcfg);
-
- if (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
- if (vp_config->rpa_strip_vlan_tag)
- val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
- else
- val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
- }
-
- writeq(val64, &vp_reg->xmac_rpa_vcfg);
- val64 = readq(&vp_reg->rxmac_vcfg0);
-
- if (vp_config->mtu !=
- VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- if ((vp_config->mtu +
- VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
- vp_config->mtu +
- VXGE_HW_MAC_HEADER_MAX_SIZE);
- else
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
- vpath->max_mtu);
- }
-
- writeq(val64, &vp_reg->rxmac_vcfg0);
-
- val64 = readq(&vp_reg->rxmac_vcfg1);
-
- val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
- VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
-
- if (hldev->config.rth_it_type ==
- VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
- val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
- 0x2) |
- VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
- }
-
- writeq(val64, &vp_reg->rxmac_vcfg1);
- }
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_tim_configure
- * This routine configures the tim registers of virtual path using the config
- * passed
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- config = vpath->vp_config;
-
- writeq(0, &vp_reg->tim_dest_addr);
- writeq(0, &vp_reg->tim_vpath_map);
- writeq(0, &vp_reg->tim_bitmap);
- writeq(0, &vp_reg->tim_remap);
-
- if (config->ring.enable == VXGE_HW_RING_ENABLE)
- writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
- (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
- VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
-
- val64 = readq(&vp_reg->tim_pci_cfg);
- val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
- writeq(val64, &vp_reg->tim_pci_cfg);
-
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- config->tti.btimer_val);
- }
-
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
- if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ac_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- }
-
- if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ci_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- }
-
- if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
- config->tti.urange_a);
- }
-
- if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
- config->tti.urange_b);
- }
-
- if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
- config->tti.urange_c);
- }
-
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- vpath->tim_tti_cfg1_saved = val64;
-
- val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
- config->tti.uec_a);
- }
-
- if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
- config->tti.uec_b);
- }
-
- if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
- config->tti.uec_c);
- }
-
- if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
- config->tti.uec_d);
- }
-
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->tti.timer_ri_en)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- else
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- }
-
- if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- config->tti.rtimer_val);
- }
-
- if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
- }
-
- if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- config->tti.ltimer_val);
- }
-
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
- vpath->tim_tti_cfg3_saved = val64;
- }
-
- if (config->ring.enable == VXGE_HW_RING_ENABLE) {
-
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
- config->rti.btimer_val);
- }
-
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
- if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ac_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
- }
-
- if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ci_en)
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- else
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- }
-
- if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
- config->rti.urange_a);
- }
-
- if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
- config->rti.urange_b);
- }
-
- if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
- config->rti.urange_c);
- }
-
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
- vpath->tim_rti_cfg1_saved = val64;
-
- val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
- config->rti.uec_a);
- }
-
- if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
- config->rti.uec_b);
- }
-
- if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
- config->rti.uec_c);
- }
-
- if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
- val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
- config->rti.uec_d);
- }
-
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
- val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
-
- if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
- if (config->rti.timer_ri_en)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- else
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
- }
-
- if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
- config->rti.rtimer_val);
- }
-
- if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
- }
-
- if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- 0x3ffffff);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
- config->rti.ltimer_val);
- }
-
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
- vpath->tim_rti_cfg3_saved = val64;
- }
-
- val64 = 0;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
- writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
- writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
-
- val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
- val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
- val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
- writeq(val64, &vp_reg->tim_wrkld_clc);
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_initialize
- * This routine is the final phase of init which initializes the
- * registers of the vpath using the configuration passed.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- u64 val64;
- u32 val32;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
- status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
-
- /* Get MRRS value from device control */
- status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
- if (status == VXGE_HW_OK) {
- val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
- val64 &=
- ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
- val64 |=
- VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
-
- val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
- }
-
- val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
- val64 |=
- VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
- VXGE_HW_MAX_PAYLOAD_SIZE_512);
-
- val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
- writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
-
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vp_terminate - Terminate Virtual Path structure
- * This routine closes all channels it opened and freeup memory
- */
-static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
- goto exit;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
- vpath->hldev->tim_int_mask1, vpath->vp_id);
- hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
-
- /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
- * work after the interface is brought down.
- */
- spin_lock(&vpath->lock);
- vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
- spin_unlock(&vpath->lock);
-
- vpath->vpmgmt_reg = NULL;
- vpath->nofl_db = NULL;
- vpath->max_mtu = 0;
- vpath->vsport_number = 0;
- vpath->max_kdfc_db = 0;
- vpath->max_nofl_db = 0;
- vpath->ringh = NULL;
- vpath->fifoh = NULL;
- memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
- vpath->stats_block = NULL;
- vpath->hw_stats = NULL;
- vpath->hw_stats_sav = NULL;
- vpath->sw_stats = NULL;
-
-exit:
- return;
-}
-
-/*
- * __vxge_hw_vp_initialize - Initialize Virtual Path structure
- * This routine is the initial phase of init which resets the vpath and
- * initializes the software support structures.
- */
-static enum vxge_hw_status
-__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
- struct vxge_hw_vp_config *config)
-{
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
- status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
- goto exit;
- }
-
- vpath = &hldev->virtual_paths[vp_id];
-
- spin_lock_init(&vpath->lock);
- vpath->vp_id = vp_id;
- vpath->vp_open = VXGE_HW_VP_OPEN;
- vpath->hldev = hldev;
- vpath->vp_config = config;
- vpath->vp_reg = hldev->vpath_reg[vp_id];
- vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
-
- __vxge_hw_vpath_reset(hldev, vp_id);
-
- status = __vxge_hw_vpath_reset_check(vpath);
- if (status != VXGE_HW_OK) {
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
- goto exit;
- }
-
- status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
- if (status != VXGE_HW_OK) {
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
- goto exit;
- }
-
- INIT_LIST_HEAD(&vpath->vpath_handles);
-
- vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
- hldev->tim_int_mask1, vp_id);
-
- status = __vxge_hw_vpath_initialize(hldev, vp_id);
- if (status != VXGE_HW_OK)
- __vxge_hw_vp_terminate(hldev, vp_id);
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_mtu_set - Set MTU.
- * Set new MTU value. Example, to use jumbo frames:
- * vxge_hw_vpath_mtu_set(my_device, 9600);
- */
-enum vxge_hw_status
-vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
- vpath = vp->vpath;
-
- new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
-
- if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
- status = VXGE_HW_ERR_INVALID_MTU_SIZE;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
- val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-
- vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
- * Enable the DMA vpath statistics. The function is to be called to re-enable
- * the adapter to update stats into the host memory
- */
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- memcpy(vpath->hw_stats_sav, vpath->hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
- * This function allocates a block from block pool or from the system
- */
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (size == blockpool->block_size) {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- blockpool->pool_size--;
- }
- }
-
- if (entry != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
-
- return entry;
-}
-
-/*
- * vxge_hw_vpath_open - Open a virtual path on a given adapter
- * This function is used to open access to virtual path of an
- * adapter for offload, GRO operations. This function returns
- * synchronously.
- */
-enum vxge_hw_status
-vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
- struct vxge_hw_vpath_attr *attr,
- struct __vxge_hw_vpath_handle **vpath_handle)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct __vxge_hw_vpath_handle *vp;
- enum vxge_hw_status status;
-
- vpath = &hldev->virtual_paths[attr->vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_OPEN) {
- status = VXGE_HW_ERR_INVALID_STATE;
- goto vpath_open_exit1;
- }
-
- status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
- &hldev->config.vp_config[attr->vp_id]);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit1;
-
- vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
- if (vp == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto vpath_open_exit2;
- }
-
- vp->vpath = vpath;
-
- if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
- status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit6;
- }
-
- if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
- status = __vxge_hw_ring_create(vp, &attr->ring_attr);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit7;
-
- __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
- }
-
- vpath->fifoh->tx_intr_num =
- (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
- VXGE_HW_VPATH_INTR_TX;
-
- vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
- VXGE_HW_BLOCK_SIZE);
- if (vpath->stats_block == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto vpath_open_exit8;
- }
-
- vpath->hw_stats = vpath->stats_block->memblock;
- memset(vpath->hw_stats, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
- vpath->hw_stats;
-
- vpath->hw_stats_sav =
- &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
- memset(vpath->hw_stats_sav, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
-
- status = vxge_hw_vpath_stats_enable(vp);
- if (status != VXGE_HW_OK)
- goto vpath_open_exit8;
-
- list_add(&vp->item, &vpath->vpath_handles);
-
- hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
-
- *vpath_handle = vp;
-
- attr->fifo_attr.userdata = vpath->fifoh;
- attr->ring_attr.userdata = vpath->ringh;
-
- return VXGE_HW_OK;
-
-vpath_open_exit8:
- if (vpath->ringh != NULL)
- __vxge_hw_ring_delete(vp);
-vpath_open_exit7:
- if (vpath->fifoh != NULL)
- __vxge_hw_fifo_delete(vp);
-vpath_open_exit6:
- vfree(vp);
-vpath_open_exit2:
- __vxge_hw_vp_terminate(hldev, attr->vp_id);
-vpath_open_exit1:
-
- return status;
-}
-
-/**
- * vxge_hw_vpath_rx_doorbell_init - Close the handle got from previous vpath
- * (vpath) open
- * @vp: Handle got from previous vpath open
- *
- * This function is used to close access to virtual path opened
- * earlier.
- */
-void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
- struct __vxge_hw_ring *ring = vpath->ringh;
- struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
- u64 new_count, val64, val164;
-
- if (vdev->titan1) {
- new_count = readq(&vpath->vp_reg->rxdmem_size);
- new_count &= 0x1fff;
- } else
- new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
-
- val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
-
- writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
- &vpath->vp_reg->prc_rxd_doorbell);
- readl(&vpath->vp_reg->prc_rxd_doorbell);
-
- val164 /= 2;
- val64 = readq(&vpath->vp_reg->prc_cfg6);
- val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
- val64 &= 0x1ff;
-
- /*
- * Each RxD is of 4 qwords
- */
- new_count -= (val64 + 1);
- val64 = min(val164, new_count) / 4;
-
- ring->rxds_limit = min(ring->rxds_limit, val64);
- if (ring->rxds_limit < 4)
- ring->rxds_limit = 4;
-}
-
-/*
- * __vxge_hw_blockpool_block_free - Frees a block from block pool
- * @devh: Hal device
- * @entry: Entry of block to be freed
- *
- * This function frees a block from block pool
- */
-static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
- struct __vxge_hw_blockpool_entry *entry)
-{
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (entry->length == blockpool->block_size) {
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- __vxge_hw_blockpool_blocks_remove(blockpool);
-}
-
-/*
- * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
- * This function is used to close access to virtual path opened
- * earlier.
- */
-enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = NULL;
- struct __vxge_hw_device *devh = NULL;
- u32 vp_id = vp->vpath->vp_id;
- u32 is_empty = TRUE;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- vpath = vp->vpath;
- devh = vpath->hldev;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_close_exit;
- }
-
- list_del(&vp->item);
-
- if (!list_empty(&vpath->vpath_handles)) {
- list_add(&vp->item, &vpath->vpath_handles);
- is_empty = FALSE;
- }
-
- if (!is_empty) {
- status = VXGE_HW_FAIL;
- goto vpath_close_exit;
- }
-
- devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
-
- if (vpath->ringh != NULL)
- __vxge_hw_ring_delete(vp);
-
- if (vpath->fifoh != NULL)
- __vxge_hw_fifo_delete(vp);
-
- if (vpath->stats_block != NULL)
- __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
-
- vfree(vp);
-
- __vxge_hw_vp_terminate(devh, vp_id);
-
-vpath_close_exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_reset - Resets vpath
- * This function is used to request a reset of vpath
- */
-enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status;
- u32 vp_id;
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
-
- vp_id = vpath->vp_id;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
- if (status == VXGE_HW_OK)
- vpath->sw_stats->soft_reset_cnt++;
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
- * This function poll's for the vpath reset completion and re initializes
- * the vpath.
- */
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath = NULL;
- enum vxge_hw_status status;
- struct __vxge_hw_device *hldev;
- u32 vp_id;
-
- vp_id = vp->vpath->vp_id;
- vpath = vp->vpath;
- hldev = vpath->hldev;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- status = __vxge_hw_vpath_reset_check(vpath);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_initialize(hldev, vp_id);
- if (status != VXGE_HW_OK)
- goto exit;
-
- if (vpath->ringh != NULL)
- __vxge_hw_vpath_prc_configure(hldev, vp_id);
-
- memset(vpath->hw_stats, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- memset(vpath->hw_stats_sav, 0,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- writeq(vpath->stats_block->dma_addr,
- &vpath->vp_reg->stats_cfg);
-
- status = vxge_hw_vpath_stats_enable(vp);
-
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_enable - Enable vpath.
- * This routine clears the vpath reset thereby enabling a vpath
- * to start forwarding frames and generating interrupts.
- */
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_device *hldev;
- u64 val64;
-
- hldev = vp->vpath->hldev;
-
- val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
- 1 << (16 - vp->vpath->vp_id));
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->cmn_rsthdlr_cfg1);
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
deleted file mode 100644
index 0cd0750484ae..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ /dev/null
@@ -1,2086 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_CONFIG_H
-#define VXGE_CONFIG_H
-#include <linux/hardirq.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-
-#ifndef VXGE_CACHE_LINE_SIZE
-#define VXGE_CACHE_LINE_SIZE 128
-#endif
-
-#ifndef VXGE_ALIGN
-#define VXGE_ALIGN(adrs, size) \
- (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
-#endif
-
-#define VXGE_HW_MIN_MTU ETH_MIN_MTU
-#define VXGE_HW_MAX_MTU 9600
-#define VXGE_HW_DEFAULT_MTU 1500
-
-#define VXGE_HW_MAX_ROM_IMAGES 8
-
-struct eprom_image {
- u8 is_valid:1;
- u8 index;
- u8 type;
- u16 version;
-};
-
-#ifdef VXGE_DEBUG_ASSERT
-/**
- * vxge_assert
- * @test: C-condition to check
- * @fmt: printf like format string
- *
- * This function implements traditional assert. By default assertions
- * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
- * compilation
- * time.
- */
-#define vxge_assert(test) BUG_ON(!(test))
-#else
-#define vxge_assert(test)
-#endif /* end of VXGE_DEBUG_ASSERT */
-
-/**
- * enum vxge_debug_level
- * @VXGE_NONE: debug disabled
- * @VXGE_ERR: all errors going to be logged out
- * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
- * going to be logged out. Very noisy.
- *
- * This enumeration going to be used to switch between different
- * debug levels during runtime if DEBUG macro defined during
- * compilation. If DEBUG macro not defined than code will be
- * compiled out.
- */
-enum vxge_debug_level {
- VXGE_NONE = 0,
- VXGE_TRACE = 1,
- VXGE_ERR = 2
-};
-
-#define NULL_VPID 0xFFFFFFFF
-#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
-#define VXGE_DEBUG_MODULE_MASK 0xffffffff
-#define VXGE_DEBUG_TRACE_MASK 0xffffffff
-#define VXGE_DEBUG_ERR_MASK 0xffffffff
-#define VXGE_DEBUG_MASK 0x000001ff
-#else
-#define VXGE_DEBUG_MODULE_MASK 0x20000000
-#define VXGE_DEBUG_TRACE_MASK 0x20000000
-#define VXGE_DEBUG_ERR_MASK 0x20000000
-#define VXGE_DEBUG_MASK 0x00000001
-#endif
-
-/*
- * @VXGE_COMPONENT_LL: do debug for vxge link layer module
- * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
- *
- * This enumeration going to be used to distinguish modules
- * or libraries during compilation and runtime. Makefile must declare
- * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
- */
-#define VXGE_COMPONENT_LL 0x20000000
-#define VXGE_COMPONENT_ALL 0xffffffff
-
-#define VXGE_HW_BASE_INF 100
-#define VXGE_HW_BASE_ERR 200
-#define VXGE_HW_BASE_BADCFG 300
-
-enum vxge_hw_status {
- VXGE_HW_OK = 0,
- VXGE_HW_FAIL = 1,
- VXGE_HW_PENDING = 2,
- VXGE_HW_COMPLETIONS_REMAIN = 3,
-
- VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
- VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
-
- VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
- VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
- VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
- VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
- VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
- VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
- VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
- VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
- VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
- VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
- VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
- VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
- VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
- VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
- VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
- VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
- VXGE_HW_ERR_PRIVILEGED_OPERATION = VXGE_HW_BASE_ERR + 17,
- VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
- VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
- VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
- VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
- VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
-
- VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
- VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
- VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
- VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
- VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
- VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
- VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
-
- VXGE_HW_EOF_TRACE_BUF = -1
-};
-
-/**
- * enum enum vxge_hw_device_link_state - Link state enumeration.
- * @VXGE_HW_LINK_NONE: Invalid link state.
- * @VXGE_HW_LINK_DOWN: Link is down.
- * @VXGE_HW_LINK_UP: Link is up.
- *
- */
-enum vxge_hw_device_link_state {
- VXGE_HW_LINK_NONE,
- VXGE_HW_LINK_DOWN,
- VXGE_HW_LINK_UP
-};
-
-/**
- * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
- * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
- * @VXGE_HW_FW_UPGRADE_DONE: upload completed
- * @VXGE_HW_FW_UPGRADE_ERR: upload error
- * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
- *
- */
-enum vxge_hw_fw_upgrade_code {
- VXGE_HW_FW_UPGRADE_OK = 0,
- VXGE_HW_FW_UPGRADE_DONE = 1,
- VXGE_HW_FW_UPGRADE_ERR = 2,
- VXGE_FW_UPGRADE_BYTES2SKIP = 3
-};
-
-/**
- * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
- * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
- * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
- * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
- * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
- * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
- */
-enum vxge_hw_fw_upgrade_err_code {
- VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
- VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
- VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
- VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
- VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
- VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
-};
-
-/**
- * struct vxge_hw_device_date - Date Format
- * @day: Day
- * @month: Month
- * @year: Year
- * @date: Date in string format
- *
- * Structure for returning date
- */
-
-#define VXGE_HW_FW_STRLEN 32
-struct vxge_hw_device_date {
- u32 day;
- u32 month;
- u32 year;
- char date[VXGE_HW_FW_STRLEN];
-};
-
-struct vxge_hw_device_version {
- u32 major;
- u32 minor;
- u32 build;
- char version[VXGE_HW_FW_STRLEN];
-};
-
-/**
- * struct vxge_hw_fifo_config - Configuration of fifo.
- * @enable: Is this fifo to be commissioned
- * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
- * blocks per queue.
- * @max_frags: Max number of Tx buffers per TxDL (that is, per single
- * transmit operation).
- * No more than 256 transmit buffers can be specified.
- * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
- * bytes. Setting @memblock_size to page size ensures
- * by-page allocation of descriptors. 128K bytes is the
- * maximum supported block size.
- * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
- * (e.g., to align on a cache line).
- * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
- * Use 0 otherwise.
- * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
- * which generally improves latency of the host bridge operation
- * (see PCI specification). For valid values please refer
- * to struct vxge_hw_fifo_config{} in the driver sources.
- * Configuration of all Titan fifos.
- * Note: Valid (min, max) range for each attribute is specified in the body of
- * the struct vxge_hw_fifo_config{} structure.
- */
-struct vxge_hw_fifo_config {
- u32 enable;
-#define VXGE_HW_FIFO_ENABLE 1
-#define VXGE_HW_FIFO_DISABLE 0
-
- u32 fifo_blocks;
-#define VXGE_HW_MIN_FIFO_BLOCKS 2
-#define VXGE_HW_MAX_FIFO_BLOCKS 128
-
- u32 max_frags;
-#define VXGE_HW_MIN_FIFO_FRAGS 1
-#define VXGE_HW_MAX_FIFO_FRAGS 256
-
- u32 memblock_size;
-#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
-#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
-#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
-
- u32 alignment_size;
-#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
-#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
-#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
-
- u32 intr;
-#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
-#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
-#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
-
- u32 no_snoop_bits;
-#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
-#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
-#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
-#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
-#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
-
-};
-/**
- * struct vxge_hw_ring_config - Ring configurations.
- * @enable: Is this ring to be commissioned
- * @ring_blocks: Numbers of RxD blocks in the ring
- * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
- * to Titan User Guide.
- * @scatter_mode: Titan supports two receive scatter modes: A and B.
- * For details please refer to Titan User Guide.
- * @rx_timer_val: The number of 32ns periods that would be counted between two
- * timer interrupts.
- * @greedy_return: If Set it forces the device to return absolutely all RxD
- * that are consumed and still on board when a timer interrupt
- * triggers. If Clear, then if the device has already returned
- * RxD before current timer interrupt triggered and after the
- * previous timer interrupt triggered, then the device is not
- * forced to returned the rest of the consumed RxD that it has
- * on board which account for a byte count less than the one
- * programmed into PRC_CFG6.RXD_CRXDT field
- * @rx_timer_ci: TBD
- * @backoff_interval_us: Time (in microseconds), after which Titan
- * tries to download RxDs posted by the host.
- * Note that the "backoff" does not happen if host posts receive
- * descriptors in the timely fashion.
- * Ring configuration.
- */
-struct vxge_hw_ring_config {
- u32 enable;
-#define VXGE_HW_RING_ENABLE 1
-#define VXGE_HW_RING_DISABLE 0
-#define VXGE_HW_RING_DEFAULT 1
-
- u32 ring_blocks;
-#define VXGE_HW_MIN_RING_BLOCKS 1
-#define VXGE_HW_MAX_RING_BLOCKS 128
-#define VXGE_HW_DEF_RING_BLOCKS 2
-
- u32 buffer_mode;
-#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
-#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
-#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
-#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
-
- u32 scatter_mode;
-#define VXGE_HW_RING_SCATTER_MODE_A 0
-#define VXGE_HW_RING_SCATTER_MODE_B 1
-#define VXGE_HW_RING_SCATTER_MODE_C 2
-#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
-
- u64 rxds_limit;
-#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
-};
-
-/**
- * struct vxge_hw_vp_config - Configuration of virtual path
- * @vp_id: Virtual Path Id
- * @min_bandwidth: Minimum Guaranteed bandwidth
- * @ring: See struct vxge_hw_ring_config{}.
- * @fifo: See struct vxge_hw_fifo_config{}.
- * @tti: Configuration of interrupt associated with Transmit.
- * see struct vxge_hw_tim_intr_config();
- * @rti: Configuration of interrupt associated with Receive.
- * see struct vxge_hw_tim_intr_config();
- * @mtu: mtu size used on this port.
- * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
- * remove the VLAN tag from all received tagged frames that are not
- * replicated at the internal L2 switch.
- * 0 - Do not strip the VLAN tag.
- * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
- * always placed into the RxDMA descriptor.
- *
- * This structure is used by the driver to pass the configuration parameters to
- * configure Virtual Path.
- */
-struct vxge_hw_vp_config {
- u32 vp_id;
-
-#define VXGE_HW_VPATH_PRIORITY_MIN 0
-#define VXGE_HW_VPATH_PRIORITY_MAX 16
-#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
-
- u32 min_bandwidth;
-#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
-#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
-#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
-
- struct vxge_hw_ring_config ring;
- struct vxge_hw_fifo_config fifo;
- struct vxge_hw_tim_intr_config tti;
- struct vxge_hw_tim_intr_config rti;
-
- u32 mtu;
-#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
-#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
-#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
-
- u32 rpa_strip_vlan_tag;
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
-#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
-
-};
-/**
- * struct vxge_hw_device_config - Device configuration.
- * @dma_blockpool_initial: Initial size of DMA Pool
- * @dma_blockpool_max: Maximum blocks in DMA pool
- * @intr_mode: Line, or MSI-X interrupt.
- *
- * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
- * @rth_it_type: RTH IT table programming type
- * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
- * @vp_config: Configuration for virtual paths
- * @device_poll_millis: Specify the interval (in mulliseconds)
- * to wait for register reads
- *
- * Titan configuration.
- * Contains per-device configuration parameters, including:
- * - stats sampling interval, etc.
- *
- * In addition, struct vxge_hw_device_config{} includes "subordinate"
- * configurations, including:
- * - fifos and rings;
- * - MAC (done at firmware level).
- *
- * See Titan User Guide for more details.
- * Note: Valid (min, max) range for each attribute is specified in the body of
- * the struct vxge_hw_device_config{} structure. Please refer to the
- * corresponding include file.
- * See also: struct vxge_hw_tim_intr_config{}.
- */
-struct vxge_hw_device_config {
- u32 device_poll_millis;
-#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
-#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
-#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
-
- u32 dma_blockpool_initial;
- u32 dma_blockpool_max;
-#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
-#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
-
-#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
-
- u32 intr_mode:2,
-#define VXGE_HW_INTR_MODE_IRQLINE 0
-#define VXGE_HW_INTR_MODE_MSIX 1
-#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
-
-#define VXGE_HW_INTR_MODE_DEF 0
-
- rth_en:1,
-#define VXGE_HW_RTH_DISABLE 0
-#define VXGE_HW_RTH_ENABLE 1
-#define VXGE_HW_RTH_DEFAULT 0
-
- rth_it_type:1,
-#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
-#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
-#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
-
- rts_mac_en:1,
-#define VXGE_HW_RTS_MAC_DISABLE 0
-#define VXGE_HW_RTS_MAC_ENABLE 1
-#define VXGE_HW_RTS_MAC_DEFAULT 0
-
- hwts_en:1;
-#define VXGE_HW_HWTS_DISABLE 0
-#define VXGE_HW_HWTS_ENABLE 1
-#define VXGE_HW_HWTS_DEFAULT 1
-
- struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * function vxge_uld_link_up_f - Link-Up callback provided by driver.
- * @devh: HW device handle.
- * Link-up notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * function vxge_uld_link_down_f - Link-Down callback provided by
- * driver.
- * @devh: HW device handle.
- *
- * Link-Down notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * function vxge_uld_crit_err_f - Critical Error notification callback.
- * @devh: HW device handle.
- * (typically - at HW device iinitialization time).
- * @type: Enumerated hw error, e.g.: double ECC.
- * @serr_data: Titan status.
- * @ext_data: Extended data. The contents depends on the @type.
- *
- * Link-Down notification callback provided by the driver.
- * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
- *
- * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
- * vxge_hw_driver_initialize().
- */
-
-/**
- * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
- * @link_up: See vxge_uld_link_up_f{}.
- * @link_down: See vxge_uld_link_down_f{}.
- * @crit_err: See vxge_uld_crit_err_f{}.
- *
- * Driver slow-path (per-driver) callbacks.
- * Implemented by driver and provided to HW via
- * vxge_hw_driver_initialize().
- * Note that these callbacks are not mandatory: HW will not invoke
- * a callback if NULL is specified.
- *
- * See also: vxge_hw_driver_initialize().
- */
-struct vxge_hw_uld_cbs {
- void (*link_up)(struct __vxge_hw_device *devh);
- void (*link_down)(struct __vxge_hw_device *devh);
- void (*crit_err)(struct __vxge_hw_device *devh,
- enum vxge_hw_event type, u64 ext_data);
-};
-
-/*
- * struct __vxge_hw_blockpool_entry - Block private data structure
- * @item: List header used to link.
- * @length: Length of the block
- * @memblock: Virtual address block
- * @dma_addr: DMA Address of the block.
- * @dma_handle: DMA handle of the block.
- * @acc_handle: DMA acc handle
- *
- * Block is allocated with a header to put the blocks into list.
- *
- */
-struct __vxge_hw_blockpool_entry {
- struct list_head item;
- u32 length;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
-};
-
-/*
- * struct __vxge_hw_blockpool - Block Pool
- * @hldev: HW device
- * @block_size: size of each block.
- * @Pool_size: Number of blocks in the pool
- * @pool_max: Maximum number of blocks above which to free additional blocks
- * @req_out: Number of block requests with OS out standing
- * @free_block_list: List of free blocks
- *
- * Block pool contains the DMA blocks preallocated.
- *
- */
-struct __vxge_hw_blockpool {
- struct __vxge_hw_device *hldev;
- u32 block_size;
- u32 pool_size;
- u32 pool_max;
- u32 req_out;
- struct list_head free_block_list;
- struct list_head free_entry_list;
-};
-
-/*
- * enum enum __vxge_hw_channel_type - Enumerated channel types.
- * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
- * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
- * @VXGE_HW_CHANNEL_TYPE_RING: ring.
- * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
- * (and recognized) channel types. Currently: 2.
- *
- * Enumerated channel types. Currently there are only two link-layer
- * channels - Titan fifo and Titan ring. In the future the list will grow.
- */
-enum __vxge_hw_channel_type {
- VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
- VXGE_HW_CHANNEL_TYPE_FIFO = 1,
- VXGE_HW_CHANNEL_TYPE_RING = 2,
- VXGE_HW_CHANNEL_TYPE_MAX = 3
-};
-
-/*
- * struct __vxge_hw_channel
- * @item: List item; used to maintain a list of open channels.
- * @type: Channel type. See enum vxge_hw_channel_type{}.
- * @devh: Device handle. HW device object that contains _this_ channel.
- * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
- * @length: Channel length. Currently allocated number of descriptors.
- * The channel length "grows" when more descriptors get allocated.
- * See _hw_mempool_grow.
- * @reserve_arr: Reserve array. Contains descriptors that can be reserved
- * by driver for the subsequent send or receive operation.
- * See vxge_hw_fifo_txdl_reserve(),
- * vxge_hw_ring_rxd_reserve().
- * @reserve_ptr: Current pointer in the resrve array
- * @reserve_top: Reserve top gives the maximum number of dtrs available in
- * reserve array.
- * @work_arr: Work array. Contains descriptors posted to the channel.
- * Note that at any point in time @work_arr contains 3 types of
- * descriptors:
- * 1) posted but not yet consumed by Titan device;
- * 2) consumed but not yet completed;
- * 3) completed but not yet freed
- * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
- * @post_index: Post index. At any point in time points on the
- * position in the channel, which'll contain next to-be-posted
- * descriptor.
- * @compl_index: Completion index. At any point in time points on the
- * position in the channel, which will contain next
- * to-be-completed descriptor.
- * @free_arr: Free array. Contains completed descriptors that were freed
- * (i.e., handed over back to HW) by driver.
- * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
- * @free_ptr: current pointer in free array
- * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
- * to store per-operation control information.
- * @stats: Pointer to common statistics
- * @userdata: Per-channel opaque (void*) user-defined context, which may be
- * driver object, ULP connection, etc.
- * Once channel is open, @userdata is passed back to user via
- * vxge_hw_channel_callback_f.
- *
- * HW channel object.
- *
- * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
- */
-struct __vxge_hw_channel {
- struct list_head item;
- enum __vxge_hw_channel_type type;
- struct __vxge_hw_device *devh;
- struct __vxge_hw_vpath_handle *vph;
- u32 length;
- u32 vp_id;
- void **reserve_arr;
- u32 reserve_ptr;
- u32 reserve_top;
- void **work_arr;
- u32 post_index ____cacheline_aligned;
- u32 compl_index ____cacheline_aligned;
- void **free_arr;
- u32 free_ptr;
- void **orig_arr;
- u32 per_dtr_space;
- void *userdata;
- struct vxge_hw_common_reg __iomem *common_reg;
- u32 first_vp_id;
- struct vxge_hw_vpath_stats_sw_common_info *stats;
-
-} ____cacheline_aligned;
-
-/*
- * struct __vxge_hw_virtualpath - Virtual Path
- *
- * @vp_id: Virtual path id
- * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
- * @hldev: Hal device
- * @vp_config: Virtual Path Config
- * @vp_reg: VPATH Register map address in BAR0
- * @vpmgmt_reg: VPATH_MGMT register map address
- * @max_mtu: Max mtu that can be supported
- * @vsport_number: vsport attached to this vpath
- * @max_kdfc_db: Maximum kernel mode doorbells
- * @max_nofl_db: Maximum non offload doorbells
- * @tx_intr_num: Interrupt Number associated with the TX
-
- * @ringh: Ring Queue
- * @fifoh: FIFO Queue
- * @vpath_handles: Virtual Path handles list
- * @stats_block: Memory for DMAing stats
- * @stats: Vpath statistics
- *
- * Virtual path structure to encapsulate the data related to a virtual path.
- * Virtual paths are allocated by the HW upon getting configuration from the
- * driver and inserted into the list of virtual paths.
- */
-struct __vxge_hw_virtualpath {
- u32 vp_id;
-
- u32 vp_open;
-#define VXGE_HW_VP_NOT_OPEN 0
-#define VXGE_HW_VP_OPEN 1
-
- struct __vxge_hw_device *hldev;
- struct vxge_hw_vp_config *vp_config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
-
- u32 max_mtu;
- u32 vsport_number;
- u32 max_kdfc_db;
- u32 max_nofl_db;
- u64 tim_tti_cfg1_saved;
- u64 tim_tti_cfg3_saved;
- u64 tim_rti_cfg1_saved;
- u64 tim_rti_cfg3_saved;
-
- struct __vxge_hw_ring *____cacheline_aligned ringh;
- struct __vxge_hw_fifo *____cacheline_aligned fifoh;
- struct list_head vpath_handles;
- struct __vxge_hw_blockpool_entry *stats_block;
- struct vxge_hw_vpath_stats_hw_info *hw_stats;
- struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- spinlock_t lock;
-};
-
-/*
- * struct __vxge_hw_vpath_handle - List item to store callback information
- * @item: List head to keep the item in linked list
- * @vpath: Virtual path to which this item belongs
- *
- * This structure is used to store the callback information.
- */
-struct __vxge_hw_vpath_handle {
- struct list_head item;
- struct __vxge_hw_virtualpath *vpath;
-};
-
-/*
- * struct __vxge_hw_device
- *
- * HW device object.
- */
-/**
- * struct __vxge_hw_device - Hal device object
- * @magic: Magic Number
- * @bar0: BAR0 virtual address.
- * @pdev: Physical device handle
- * @config: Confguration passed by the LL driver at initialization
- * @link_state: Link state
- *
- * HW device object. Represents Titan adapter
- */
-struct __vxge_hw_device {
- u32 magic;
-#define VXGE_HW_DEVICE_MAGIC 0x12345678
-#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
- void __iomem *bar0;
- struct pci_dev *pdev;
- struct net_device *ndev;
- struct vxge_hw_device_config config;
- enum vxge_hw_device_link_state link_state;
-
- const struct vxge_hw_uld_cbs *uld_callbacks;
-
- u32 host_type;
- u32 func_id;
- u32 access_rights;
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
-#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
- struct vxge_hw_legacy_reg __iomem *legacy_reg;
- struct vxge_hw_toc_reg __iomem *toc_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
- struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
- [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
- [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
- struct vxge_hw_vpath_reg __iomem *vpath_reg \
- [VXGE_HW_TITAN_VPATH_REG_SPACES];
- u8 __iomem *kdfc;
- u8 __iomem *usdc;
- struct __vxge_hw_virtualpath virtual_paths \
- [VXGE_HW_MAX_VIRTUAL_PATHS];
- u64 vpath_assignments;
- u64 vpaths_deployed;
- u32 first_vp_id;
- u64 tim_int_mask0[4];
- u32 tim_int_mask1[4];
-
- struct __vxge_hw_blockpool block_pool;
- struct vxge_hw_device_stats stats;
- u32 debug_module_mask;
- u32 debug_level;
- u32 level_err;
- u32 level_trace;
- u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
-};
-
-#define VXGE_HW_INFO_LEN 64
-/**
- * struct vxge_hw_device_hw_info - Device information
- * @host_type: Host Type
- * @func_id: Function Id
- * @vpath_mask: vpath bit mask
- * @fw_version: Firmware version
- * @fw_date: Firmware Date
- * @flash_version: Firmware version
- * @flash_date: Firmware Date
- * @mac_addrs: Mac addresses for each vpath
- * @mac_addr_masks: Mac address masks for each vpath
- *
- * Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver and the first mac address for each vpath
- */
-struct vxge_hw_device_hw_info {
- u32 host_type;
-#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
-#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
-#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
-#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
-#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
-#define VXGE_HW_SR_VH_FUNCTION0 5
-#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
-#define VXGE_HW_VH_NORMAL_FUNCTION 7
- u64 function_mode;
-#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
-#define VXGE_HW_FUNCTION_MODE_SRIOV 2
-#define VXGE_HW_FUNCTION_MODE_MRIOV 3
-#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
-#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
-#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
-#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
-
- u32 func_id;
- u64 vpath_mask;
- struct vxge_hw_device_version fw_version;
- struct vxge_hw_device_date fw_date;
- struct vxge_hw_device_version flash_version;
- struct vxge_hw_device_date flash_date;
- u8 serial_number[VXGE_HW_INFO_LEN];
- u8 part_number[VXGE_HW_INFO_LEN];
- u8 product_desc[VXGE_HW_INFO_LEN];
- u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
- u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
-};
-
-/**
- * struct vxge_hw_device_attr - Device memory spaces.
- * @bar0: BAR0 virtual address.
- * @pdev: PCI device object.
- *
- * Device memory spaces. Includes configuration, BAR0 etc. per device
- * mapped memories. Also, includes a pointer to OS-specific PCI device object.
- */
-struct vxge_hw_device_attr {
- void __iomem *bar0;
- struct pci_dev *pdev;
- const struct vxge_hw_uld_cbs *uld_callbacks;
-};
-
-#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
-
-#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
- if (i < 16) { \
- m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
- m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
- } \
- else { \
- m1[0] = 0x80000000; \
- m1[1] = 0x40000000; \
- } \
-}
-
-#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
- if (i < 16) { \
- m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
- m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
- } \
- else { \
- m1[0] = 0; \
- m1[1] = 0; \
- } \
-}
-
-#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
- status = vxge_hw_mrpcim_stats_access(hldev, \
- VXGE_HW_STATS_OP_READ, \
- loc, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
-/*
- * struct __vxge_hw_ring - Ring channel.
- * @channel: Channel "base" of this ring, the common part of all HW
- * channels.
- * @mempool: Memory pool, the pool from which descriptors get allocated.
- * (See vxge_hw_mm.h).
- * @config: Ring configuration, part of device configuration
- * (see struct vxge_hw_device_config{}).
- * @ring_length: Length of the ring
- * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
- * as per Titan User Guide.
- * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
- * 1-buffer mode descriptor is 32 byte long, etc.
- * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
- * per-descriptor data (e.g., DMA handle for Solaris)
- * @per_rxd_space: Per rxd space requested by driver
- * @rxds_per_block: Number of descriptors per hardware-defined RxD
- * block. Depends on the (1-, 3-, 5-) buffer mode.
- * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
- * usage. Not to confuse with @rxd_priv_size.
- * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
- * @callback: Channel completion callback. HW invokes the callback when there
- * are new completions on that channel. In many implementations
- * the @callback executes in the hw interrupt context.
- * @rxd_init: Channel's descriptor-initialize callback.
- * See vxge_hw_ring_rxd_init_f{}.
- * If not NULL, HW invokes the callback when opening
- * the ring.
- * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding channel.
- * See also vxge_hw_channel_rxd_term_f{}.
- * @stats: Statistics for ring
- * Ring channel.
- *
- * Note: The structure is cache line aligned to better utilize
- * CPU cache performance.
- */
-struct __vxge_hw_ring {
- struct __vxge_hw_channel channel;
- struct vxge_hw_mempool *mempool;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_common_reg __iomem *common_reg;
- u32 ring_length;
- u32 buffer_mode;
- u32 rxd_size;
- u32 rxd_priv_size;
- u32 per_rxd_space;
- u32 rxds_per_block;
- u32 rxdblock_priv_size;
- u32 cmpl_cnt;
- u32 vp_id;
- u32 doorbell_cnt;
- u32 total_db_cnt;
- u64 rxds_limit;
- u32 rtimer;
- u64 tim_rti_cfg1_saved;
- u64 tim_rti_cfg3_saved;
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_ring *ringh,
- void *rxdh,
- u8 t_code,
- void *userdata);
-
- enum vxge_hw_status (*rxd_init)(
- void *rxdh,
- void *userdata);
-
- void (*rxd_term)(
- void *rxdh,
- enum vxge_hw_rxd_state state,
- void *userdata);
-
- struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
- struct vxge_hw_ring_config *config;
-} ____cacheline_aligned;
-
-/**
- * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
- * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
- * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
- * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
- * device.
- * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
- * filling-in and posting later.
- *
- * Titan/HW descriptor states.
- *
- */
-enum vxge_hw_txdl_state {
- VXGE_HW_TXDL_STATE_NONE = 0,
- VXGE_HW_TXDL_STATE_AVAIL = 1,
- VXGE_HW_TXDL_STATE_POSTED = 2,
- VXGE_HW_TXDL_STATE_FREED = 3
-};
-/*
- * struct __vxge_hw_fifo - Fifo.
- * @channel: Channel "base" of this fifo, the common part of all HW
- * channels.
- * @mempool: Memory pool, from which descriptors get allocated.
- * @config: Fifo configuration, part of device configuration
- * (see struct vxge_hw_device_config{}).
- * @interrupt_type: Interrupt type to be used
- * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
- * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
- * on TxDL please refer to Titan UG.
- * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
- * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
- * @priv_size: Per-Tx descriptor space reserved for driver
- * usage.
- * @per_txdl_space: Per txdl private space for the driver
- * @callback: Fifo completion callback. HW invokes the callback when there
- * are new completions on that fifo. In many implementations
- * the @callback executes in the hw interrupt context.
- * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding fifo.
- * See also vxge_hw_fifo_txdl_term_f{}.
- * @stats: Statistics of this fifo
- *
- * Fifo channel.
- * Note: The structure is cache line aligned.
- */
-struct __vxge_hw_fifo {
- struct __vxge_hw_channel channel;
- struct vxge_hw_mempool *mempool;
- struct vxge_hw_fifo_config *config;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
- u64 interrupt_type;
- u32 no_snoop_bits;
- u32 txdl_per_memblock;
- u32 txdl_size;
- u32 priv_size;
- u32 per_txdl_space;
- u32 vp_id;
- u32 tx_intr_num;
- u32 rtimer;
- u64 tim_tti_cfg1_saved;
- u64 tim_tti_cfg3_saved;
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code,
- void *userdata,
- struct sk_buff ***skb_ptr,
- int nr_skb,
- int *more);
-
- void (*txdl_term)(
- void *txdlh,
- enum vxge_hw_txdl_state state,
- void *userdata);
-
- struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
-} ____cacheline_aligned;
-
-/*
- * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
- * @dma_addr: DMA (mapped) address of _this_ descriptor.
- * @dma_handle: DMA handle used to map the descriptor onto device.
- * @dma_offset: Descriptor's offset in the memory block. HW allocates
- * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
- * Each memblock is a contiguous block of DMA-able memory.
- * @frags: Total number of fragments (that is, contiguous data buffers)
- * carried by this TxDL.
- * @align_vaddr_start: Aligned virtual address start
- * @align_vaddr: Virtual address of the per-TxDL area in memory used for
- * alignement. Used to place one or more mis-aligned fragments
- * @align_dma_addr: DMA address translated from the @align_vaddr.
- * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
- * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
- * @align_dma_offset: The current offset into the @align_vaddr area.
- * Grows while filling the descriptor, gets reset.
- * @align_used_frags: Number of fragments used.
- * @alloc_frags: Total number of fragments allocated.
- * @unused: TODO
- * @next_txdl_priv: (TODO).
- * @first_txdp: (TODO).
- * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
- * TxDL list.
- * @txdlh: Corresponding txdlh to this TxDL.
- * @memblock: Pointer to the TxDL memory block or memory page.
- * on the next send operation.
- * @dma_object: DMA address and handle of the memory block that contains
- * the descriptor. This member is used only in the "checked"
- * version of the HW (to enforce certain assertions);
- * otherwise it gets compiled out.
- * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
- *
- * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
- * information associated with the descriptor. Note that driver can ask HW
- * to allocate additional per-descriptor space for its own (driver-specific)
- * purposes.
- *
- * See also: struct vxge_hw_ring_rxd_priv{}.
- */
-struct __vxge_hw_fifo_txdl_priv {
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- ptrdiff_t dma_offset;
- u32 frags;
- u8 *align_vaddr_start;
- u8 *align_vaddr;
- dma_addr_t align_dma_addr;
- struct pci_dev *align_dma_handle;
- struct pci_dev *align_dma_acch;
- ptrdiff_t align_dma_offset;
- u32 align_used_frags;
- u32 alloc_frags;
- u32 unused;
- struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
- struct vxge_hw_fifo_txd *first_txdp;
- void *memblock;
-};
-
-/*
- * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
- * @control_0: Bits 0 to 7 - Doorbell type.
- * Bits 8 to 31 - Reserved.
- * Bits 32 to 39 - The highest TxD in this TxDL.
- * Bits 40 to 47 - Reserved.
- * Bits 48 to 55 - Reserved.
- * Bits 56 to 63 - No snoop flags.
- * @txdl_ptr: The starting location of the TxDL in host memory.
- *
- * Created by the host and written to the adapter via PIO to a Kernel Doorbell
- * FIFO. All non-offload doorbell wrapper fields must be written by the host as
- * part of a doorbell write. Consumed by the adapter but is not written by the
- * adapter.
- */
-struct __vxge_hw_non_offload_db_wrapper {
- u64 control_0;
-#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
-#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_NODBW_TYPE_NODBW 0
-
-#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
-#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
-
-#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
-#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
-#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
-#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
-
- u64 txdl_ptr;
-};
-
-/*
- * TX Descriptor
- */
-
-/**
- * struct vxge_hw_fifo_txd - Transmit Descriptor
- * @control_0: Bits 0 to 6 - Reserved.
- * Bit 7 - List Ownership. This field should be initialized
- * to '1' by the driver before the transmit list pointer is
- * written to the adapter. This field will be set to '0' by the
- * adapter once it has completed transmitting the frame or frames in
- * the list. Note - This field is only valid in TxD0. Additionally,
- * for multi-list sequences, the driver should not release any
- * buffers until the ownership of the last list in the multi-list
- * sequence has been returned to the host.
- * Bits 8 to 11 - Reserved
- * Bits 12 to 15 - Transfer_Code. This field is only valid in
- * TxD0. It is used to describe the status of the transmit data
- * buffer transfer. This field is always overwritten by the
- * adapter, so this field may be initialized to any value.
- * Bits 16 to 17 - Host steering. This field allows the host to
- * override the selection of the physical transmit port.
- * Attention:
- * Normal sounds as if learned from the switch rather than from
- * the aggregation algorythms.
- * 00: Normal. Use Destination/MAC Address
- * lookup to determine the transmit port.
- * 01: Send on physical Port1.
- * 10: Send on physical Port0.
- * 11: Send on both ports.
- * Bits 18 to 21 - Reserved
- * Bits 22 to 23 - Gather_Code. This field is set by the host and
- * is used to describe how individual buffers comprise a frame.
- * 10: First descriptor of a frame.
- * 00: Middle of a multi-descriptor frame.
- * 01: Last descriptor of a frame.
- * 11: First and last descriptor of a frame (the entire frame
- * resides in a single buffer).
- * For multi-descriptor frames, the only valid gather code sequence
- * is {10, [00], 01}. In other words, the descriptors must be placed
- * in the list in the correct order.
- * Bits 24 to 27 - Reserved
- * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
- * definition. Only valid in TxD0. This field allows the host to
- * indicate the Ethernet encapsulation of an outbound LSO packet.
- * 00 - classic mode (best guess)
- * 01 - LLC
- * 10 - SNAP
- * 11 - DIX
- * If "classic mode" is selected, the adapter will attempt to
- * decode the frame's Ethernet encapsulation by examining the L/T
- * field as follows:
- * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
- * if packet is IPv4 or IPv6.
- * 0x8870 Jumbo-SNAP encoding.
- * 0x0800 IPv4 DIX encoding
- * 0x86DD IPv6 DIX encoding
- * others illegal encapsulation
- * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
- * Set to 1 to perform segmentation offload for TCP/UDP.
- * This field is valid only in TxD0.
- * Bits 31 to 33 - Reserved.
- * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
- * This field is meaningful only when LSO_Control is non-zero.
- * When LSO_Control is set to TCP_LSO, the single (possibly large)
- * TCP segment described by this TxDL will be sent as a series of
- * TCP segments each of which contains no more than LSO_MSS
- * payload bytes.
- * When LSO_Control is set to UDP_LSO, the single (possibly large)
- * UDP datagram described by this TxDL will be sent as a series of
- * UDP datagrams each of which contains no more than LSO_MSS
- * payload bytes.
- * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
- * or TCP payload, with the exception of the last, which will have
- * <= LSO_MSS bytes of payload.
- * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
- * buffer to be read by the adapter. This field is written by the
- * host. A value of 0 is illegal.
- * Bits 32 to 63 - This value is written by the adapter upon
- * completion of a UDP or TCP LSO operation and indicates the number
- * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
- * returned for any non-LSO operation.
- * @control_1: Bits 0 to 4 - Reserved.
- * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
- * offload. This field is only valid in the first TxD of a frame.
- * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
- * This field is only valid in the first TxD of a frame (the TxD's
- * gather code must be 10 or 11). The driver should only set this
- * bit if it can guarantee that TCP is present.
- * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
- * This field is only valid in the first TxD of a frame (the TxD's
- * gather code must be 10 or 11). The driver should only set this
- * bit if it can guarantee that UDP is present.
- * Bits 8 to 14 - Reserved.
- * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
- * instruct the adapter to insert the VLAN tag specified by the
- * Tx_VLAN_Tag field. This field is only valid in the first TxD of
- * a frame.
- * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
- * to be inserted into the frame by the adapter (the first two bytes
- * of a VLAN tag are always 0x8100). This field is only valid if the
- * Tx_VLAN_Enable field is set to '1'.
- * Bits 32 to 33 - Reserved.
- * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
- * number the frame associated with. This field is written by the
- * host. It is only valid in the first TxD of a frame.
- * Bits 40 to 42 - Reserved.
- * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
- * functions. This field is valid only in the first TxD
- * of a frame.
- * Bits 44 to 45 - Reserved.
- * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
- * generate an interrupt as soon as all of the frames in the list
- * have been transmitted. In order to have per-frame interrupts,
- * the driver should place a maximum of one frame per list. This
- * field is only valid in the first TxD of a frame.
- * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
- * to count the frame toward the utilization interrupt specified in
- * the Tx_Int_Number field. This field is only valid in the first
- * TxD of a frame.
- * Bits 48 to 63 - Reserved.
- * @buffer_pointer: Buffer start address.
- * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
- * Titan descriptor prior to posting the latter on the fifo
- * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
- * to the driver with each completed descriptor.
- *
- * Transmit descriptor (TxD).Fifo descriptor contains configured number
- * (list) of TxDs. * For more details please refer to Titan User Guide,
- * Section 5.4.2 "Transmit Descriptor (TxD) Format".
- */
-struct vxge_hw_fifo_txd {
- u64 control_0;
-#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
-
-#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
-#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
-
-
-#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
-#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
-#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
-
-
-#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
-
-#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
-
-#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
-
- u64 control_1;
-#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
-#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
-#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
-#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
-
-#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
-
-#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
-#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
-
- u64 buffer_pointer;
-
- u64 host_control;
-};
-
-/**
- * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
- * @host_control: This field is exclusively for host use and is "readonly"
- * from the adapter's perspective.
- * @control_0:Bits 0 to 6 - RTH_Bucket get
- * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
- * by the host, and is set to 0 by the adapter.
- * 0 - Host owns RxD and buffer.
- * 1 - The adapter owns RxD and buffer.
- * Bit 8 - Fast_Path_Eligible When set, indicates that the
- * received frame meets all of the criteria for fast path processing.
- * The required criteria are as follows:
- * !SYN &
- * (Transfer_Code == "Transfer OK") &
- * (!Is_IP_Fragment) &
- * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
- * (Is_IPv6)) &
- * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
- * (Is_UDP & (computed_L4_checksum == 0xFFFF |
- * computed _L4_checksum == 0x0000)))
- * (same meaning for all RxD buffer modes)
- * Bit 9 - L3 Checksum Correct
- * Bit 10 - L4 Checksum Correct
- * Bit 11 - Reserved
- * Bit 12 to 15 - This field is written by the adapter. It is
- * used to report the status of the frame transfer to the host.
- * 0x0 - Transfer OK
- * 0x4 - RDA Failure During Transfer
- * 0x5 - Unparseable Packet, such as unknown IPv6 header.
- * 0x6 - Frame integrity error (FCS or ECC).
- * 0x7 - Buffer Size Error. The provided buffer(s) were not
- * appropriately sized and data loss occurred.
- * 0x8 - Internal ECC Error. RxD corrupted.
- * 0x9 - IPv4 Checksum error
- * 0xA - TCP/UDP Checksum error
- * 0xF - Unknown Error or Multiple Error. Indicates an
- * unknown problem or that more than one of transfer codes is set.
- * Bit 16 - SYN The adapter sets this field to indicate that
- * the incoming frame contained a TCP segment with its SYN bit
- * set and its ACK bit NOT set. (same meaning for all RxD buffer
- * modes)
- * Bit 17 - Is ICMP
- * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
- * Socket Pair Direct Match Table and the frame was steered based
- * on SPDM.
- * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
- * Indirection Table and the frame was steered based on hash
- * indirection.
- * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
- * type) that was used to calculate the hash.
- * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
- * tagged.
- * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
- * of the received frame.
- * 0x0 - Ethernet DIX
- * 0x1 - LLC
- * 0x2 - SNAP (includes Jumbo-SNAP)
- * 0x3 - IPX
- * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
- * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
- * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
- * IP packet.
- * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
- * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
- * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
- * arrived with the frame. If the resulting computed IPv4 header
- * checksum for the frame did not produce the expected 0xFFFF value,
- * then the transfer code would be set to 0x9.
- * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
- * arrived with the frame. If the resulting computed TCP/UDP checksum
- * for the frame did not produce the expected 0xFFFF value, then the
- * transfer code would be set to 0xA.
- * @control_1:Bits 0 to 1 - Reserved
- * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
- * eventually overwritten by the adapter. The host writes the
- * available buffer size in bytes when it passes the descriptor to
- * the adapter. When a frame is delivered the host, the adapter
- * populates this field with the number of bytes written into the
- * buffer. The largest supported buffer is 16, 383 bytes.
- * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
- * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
- * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
- * of the VLAN tag, if one was detected by the adapter. This field is
- * populated even if VLAN-tag stripping is enabled.
- * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
- *
- * One buffer mode RxD for ring structure
- */
-struct vxge_hw_ring_rxd_1 {
- u64 host_control;
- u64 control_0;
-#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
-
-#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
-
-#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
-
-#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
-
-#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
-
-#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
-#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
-
-#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
-
-#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
-
-#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
-
-#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
-
-#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
-
-#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
-
-#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
-
-#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
-
-#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
-
-#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
-
-#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
-
- u64 control_1;
-
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
-#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
-
-#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
-
-#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
-
- u64 buffer0_ptr;
-};
-
-enum vxge_hw_rth_algoritms {
- RTH_ALG_JENKINS = 0,
- RTH_ALG_MS_RSS = 1,
- RTH_ALG_CRC32C = 2
-};
-
-/**
- * struct vxge_hw_rth_hash_types - RTH hash types.
- * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
- * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
- * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
- * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
- * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
- * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
- *
- * Used to pass RTH hash types to rts_rts_set.
- *
- * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
- */
-struct vxge_hw_rth_hash_types {
- u8 hash_type_tcpipv4_en:1,
- hash_type_ipv4_en:1,
- hash_type_tcpipv6_en:1,
- hash_type_ipv6_en:1,
- hash_type_tcpipv6ex_en:1,
- hash_type_ipv6ex_en:1;
-};
-
-void vxge_hw_device_debug_set(
- struct __vxge_hw_device *devh,
- enum vxge_debug_level level,
- u32 mask);
-
-u32
-vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
-
-u32
-vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
-
-/**
- * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
- * @buf_mode: Buffer mode (1, 3 or 5)
- *
- * This function returns the size of RxD for given buffer mode
- */
-static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
-{
- return sizeof(struct vxge_hw_ring_rxd_1);
-}
-
-/**
- * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
- * @buf_mode: Buffer mode (1 buffer mode only)
- *
- * This function returns the number of RxD for RxD block for given buffer mode
- */
-static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
-{
- return (u32)((VXGE_HW_BLOCK_SIZE-16) /
- sizeof(struct vxge_hw_ring_rxd_1));
-}
-
-/**
- * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
- * @rxdh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
- * the receive buffer should be already mapped to the device
- * @size: Size of the receive @dma_pointer buffer.
- *
- * Prepare 1-buffer-mode Rx descriptor for posting
- * (via vxge_hw_ring_rxd_post()).
- *
- * This inline helper-function does not return any parameters and always
- * succeeds.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_set(
- void *rxdh,
- dma_addr_t dma_pointer,
- u32 size)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- rxdp->buffer0_ptr = dma_pointer;
- rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
- rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
-}
-
-/**
- * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
- * descriptor.
- * @vpath_handle: Virtual Path handle.
- * @rxdh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * carries. Returned by HW.
- * @pkt_length: Length (in bytes) of the data in the buffer pointed by
- *
- * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
- * This inline helper-function uses completed descriptor to populate receive
- * buffer pointer and other "out" parameters. The function always succeeds.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_get(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- u32 *pkt_length)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
-
- *pkt_length =
- (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
-}
-
-/**
- * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
- * a completed receive descriptor for 1b mode.
- * @vpath_handle: Virtual Path handle.
- * @rxdh: Descriptor handle.
- * @rxd_info: Descriptor information
- *
- * Retrieve extended information associated with a completed receive descriptor.
- *
- */
-static inline
-void vxge_hw_ring_rxd_1b_info_get(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- struct vxge_hw_ring_rxd_info *rxd_info)
-{
-
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- rxd_info->syn_flag =
- (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
- rxd_info->is_icmp =
- (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
- rxd_info->fast_path_eligible =
- (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
- rxd_info->l3_cksum_valid =
- (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
- rxd_info->l3_cksum =
- (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
- rxd_info->l4_cksum_valid =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
- rxd_info->l4_cksum =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
- rxd_info->frame =
- (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
- rxd_info->proto =
- (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
- rxd_info->is_vlan =
- (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
- rxd_info->vlan =
- (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
- rxd_info->rth_bucket =
- (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
- rxd_info->rth_it_hit =
- (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
- rxd_info->rth_spdm_hit =
- (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
- rxd_info->rth_hash_type =
- (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
- rxd_info->rth_value =
- (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
-}
-
-/**
- * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
- * of 1b mode 3b mode ring.
- * @rxdh: Descriptor handle.
- *
- * Returns: private driver info associated with the descriptor.
- * driver requests per-descriptor space via vxge_hw_ring_attr.
- *
- */
-static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- return (void *)(size_t)rxdp->host_control;
-}
-
-/**
- * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
- * @txdlh: Descriptor handle.
- * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
- * and/or TCP and/or UDP.
- *
- * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
- * descriptor.
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
- * and vxge_hw_fifo_txdl_buffer_set().
- * All these APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
- txdp->control_1 |= cksum_bits;
-}
-
-/**
- * vxge_hw_fifo_txdl_mss_set - Set MSS.
- * @txdlh: Descriptor handle.
- * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
- * driver, which in turn inserts the MSS into the @txdlh.
- *
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
- * and vxge_hw_fifo_txdl_cksum_set_bits().
- * All these APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
- txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
-}
-
-/**
- * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
- * @txdlh: Descriptor handle.
- * @vlan_tag: 16bit VLAN tag.
- *
- * Insert VLAN tag into specified transmit descriptor.
- * The actual insertion of the tag into outgoing frame is done by the hardware.
- */
-static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
- txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
-}
-
-/**
- * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
- * @txdlh: Descriptor handle.
- *
- * Retrieve per-descriptor private data.
- * Note that driver requests per-descriptor space via
- * struct vxge_hw_fifo_attr passed to
- * vxge_hw_vpath_open().
- *
- * Returns: private driver data associated with the descriptor.
- */
-static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
-{
- struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
-
- return (void *)(size_t)txdp->host_control;
-}
-
-/**
- * struct vxge_hw_ring_attr - Ring open "template".
- * @callback: Ring completion callback. HW invokes the callback when there
- * are new completions on that ring. In many implementations
- * the @callback executes in the hw interrupt context.
- * @rxd_init: Ring's descriptor-initialize callback.
- * See vxge_hw_ring_rxd_init_f{}.
- * If not NULL, HW invokes the callback when opening
- * the ring.
- * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding ring.
- * See also vxge_hw_ring_rxd_term_f{}.
- * @userdata: User-defined "context" of _that_ ring. Passed back to the
- * user as one of the @callback, @rxd_init, and @rxd_term arguments.
- * @per_rxd_space: If specified (i.e., greater than zero): extra space
- * reserved by HW per each receive descriptor.
- * Can be used to store
- * and retrieve on completion, information specific
- * to the driver.
- *
- * Ring open "template". User fills the structure with ring
- * attributes and passes it to vxge_hw_vpath_open().
- */
-struct vxge_hw_ring_attr {
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_ring *ringh,
- void *rxdh,
- u8 t_code,
- void *userdata);
-
- enum vxge_hw_status (*rxd_init)(
- void *rxdh,
- void *userdata);
-
- void (*rxd_term)(
- void *rxdh,
- enum vxge_hw_rxd_state state,
- void *userdata);
-
- void *userdata;
- u32 per_rxd_space;
-};
-
-/**
- * function vxge_hw_fifo_callback_f - FIFO callback.
- * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
- * descriptors.
- * @txdlh: First completed descriptor.
- * @txdl_priv: Pointer to per txdl space allocated
- * @t_code: Transfer code, as per Titan User Guide.
- * Returned by HW.
- * @host_control: Opaque 64bit data stored by driver inside the Titan
- * descriptor prior to posting the latter on the fifo
- * via vxge_hw_fifo_txdl_post(). The @host_control is returned
- * as is to the driver with each completed descriptor.
- * @userdata: Opaque per-fifo data specified at fifo open
- * time, via vxge_hw_vpath_open().
- *
- * Fifo completion callback (type declaration). A single per-fifo
- * callback is specified at fifo open time, via
- * vxge_hw_vpath_open(). Typically gets called as part of the processing
- * of the Interrupt Service Routine.
- *
- * Fifo callback gets called by HW if, and only if, there is at least
- * one new completion on a given fifo. Upon processing the first @txdlh driver
- * is _supposed_ to continue consuming completions using:
- * - vxge_hw_fifo_txdl_next_completed()
- *
- * Note that failure to process new completions in a timely fashion
- * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
- *
- * Non-zero @t_code means failure to process transmit descriptor.
- *
- * In the "transmit" case the failure could happen, for instance, when the
- * link is down, in which case Titan completes the descriptor because it
- * is not able to send the data out.
- *
- * For details please refer to Titan User Guide.
- *
- * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
- */
-/**
- * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
- * @txdlh: First completed descriptor.
- * @txdl_priv: Pointer to per txdl space allocated
- * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
- * @userdata: Per-fifo user data (a.k.a. context) specified at
- * fifo open time, via vxge_hw_vpath_open().
- *
- * Terminate descriptor callback. Unless NULL is specified in the
- * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
- * HW invokes the callback as part of closing fifo, prior to
- * de-allocating the ring and associated data structures
- * (including descriptors).
- * driver should utilize the callback to (for instance) unmap
- * and free DMA data buffers associated with the posted (state =
- * VXGE_HW_TXDL_STATE_POSTED) descriptors,
- * as well as other relevant cleanup functions.
- *
- * See also: struct vxge_hw_fifo_attr{}
- */
-/**
- * struct vxge_hw_fifo_attr - Fifo open "template".
- * @callback: Fifo completion callback. HW invokes the callback when there
- * are new completions on that fifo. In many implementations
- * the @callback executes in the hw interrupt context.
- * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
- * HW invokes the callback when closing the corresponding fifo.
- * See also vxge_hw_fifo_txdl_term_f{}.
- * @userdata: User-defined "context" of _that_ fifo. Passed back to the
- * user as one of the @callback, and @txdl_term arguments.
- * @per_txdl_space: If specified (i.e., greater than zero): extra space
- * reserved by HW per each transmit descriptor. Can be used to
- * store, and retrieve on completion, information specific
- * to the driver.
- *
- * Fifo open "template". User fills the structure with fifo
- * attributes and passes it to vxge_hw_vpath_open().
- */
-struct vxge_hw_fifo_attr {
-
- enum vxge_hw_status (*callback)(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code,
- void *userdata,
- struct sk_buff ***skb_ptr,
- int nr_skb, int *more);
-
- void (*txdl_term)(
- void *txdlh,
- enum vxge_hw_txdl_state state,
- void *userdata);
-
- void *userdata;
- u32 per_txdl_space;
-};
-
-/**
- * struct vxge_hw_vpath_attr - Attributes of virtual path
- * @vp_id: Identifier of Virtual Path
- * @ring_attr: Attributes of ring for non-offload receive
- * @fifo_attr: Attributes of fifo for non-offload transmit
- *
- * Attributes of virtual path. This structure is passed as parameter
- * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
- */
-struct vxge_hw_vpath_attr {
- u32 vp_id;
- struct vxge_hw_ring_attr ring_attr;
- struct vxge_hw_fifo_attr fifo_attr;
-};
-
-enum vxge_hw_status vxge_hw_device_hw_info_get(
- void __iomem *bar0,
- struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status vxge_hw_device_config_default_get(
- struct vxge_hw_device_config *device_config);
-
-/**
- * vxge_hw_device_link_state_get - Get link state.
- * @devh: HW device handle.
- *
- * Get link state.
- * Returns: link state.
- */
-static inline
-enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
- struct __vxge_hw_device *devh)
-{
- return devh->link_state;
-}
-
-void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
-
-const u8 *
-vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
-
-u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
-
-const u8 *
-vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_initialize(
- struct __vxge_hw_device **devh,
- struct vxge_hw_device_attr *attr,
- struct vxge_hw_device_config *device_config);
-
-enum vxge_hw_status vxge_hw_device_getpause_data(
- struct __vxge_hw_device *devh,
- u32 port,
- u32 *tx,
- u32 *rx);
-
-enum vxge_hw_status vxge_hw_device_setpause_data(
- struct __vxge_hw_device *devh,
- u32 port,
- u32 tx,
- u32 rx);
-
-static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
- unsigned long size,
- struct pci_dev **p_dmah,
- struct pci_dev **p_dma_acch)
-{
- void *vaddr;
- unsigned long misaligned = 0;
- int realloc_flag = 0;
- *p_dma_acch = *p_dmah = NULL;
-
-realloc:
- vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
- if (vaddr == NULL)
- return vaddr;
- misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
- VXGE_CACHE_LINE_SIZE);
- if (realloc_flag)
- goto out;
-
- if (misaligned) {
- /* misaligned, free current one and try allocating
- * size + VXGE_CACHE_LINE_SIZE memory
- */
- kfree(vaddr);
- size += VXGE_CACHE_LINE_SIZE;
- realloc_flag = 1;
- goto realloc;
- }
-out:
- *(unsigned long *)p_dma_acch = misaligned;
- vaddr = (void *)((u8 *)vaddr + misaligned);
- return vaddr;
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
-/*
- * __vxge_hw_mempool_item_priv - will return pointer on per item private space
- */
-static inline void*
-__vxge_hw_mempool_item_priv(
- struct vxge_hw_mempool *mempool,
- u32 memblock_idx,
- void *item,
- u32 *memblock_item_idx)
-{
- ptrdiff_t offset;
- void *memblock = mempool->memblocks_arr[memblock_idx];
-
-
- offset = (u32)((u8 *)item - (u8 *)memblock);
- vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
-
- (*memblock_item_idx) = (u32) offset / mempool->item_size;
- vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
-
- return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
- (*memblock_item_idx) * mempool->items_priv_size;
-}
-
-/*
- * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
- * for the fifo.
- * @fifo: Fifo
- * @txdp: Poniter to a TxD
- */
-static inline struct __vxge_hw_fifo_txdl_priv *
-__vxge_hw_fifo_txdl_priv(
- struct __vxge_hw_fifo *fifo,
- struct vxge_hw_fifo_txd *txdp)
-{
- return (struct __vxge_hw_fifo_txdl_priv *)
- (((char *)((ulong)txdp->host_control)) +
- fifo->per_txdl_space);
-}
-
-enum vxge_hw_status vxge_hw_vpath_open(
- struct __vxge_hw_device *devh,
- struct vxge_hw_vpath_attr *attr,
- struct __vxge_hw_vpath_handle **vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_close(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-vxge_hw_vpath_reset(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
-
-enum vxge_hw_status
-vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status vxge_hw_vpath_mtu_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 new_mtu);
-
-void
-vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-
-static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
-{
- writel(val, addr + 4);
-}
-
-static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
-enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
-
-enum vxge_hw_status
-vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
-
-/**
- * vxge_debug_ll
- * @level: level of debug verbosity.
- * @mask: mask for the debug
- * @buf: Circular buffer for tracing
- * @fmt: printf like format string
- *
- * Provides logging facilities. Can be customized on per-module
- * basis or/and with debug levels. Input parameters, except
- * module and level, are the same as posix printf. This function
- * may be compiled out if DEBUG macro was never defined.
- * See also: enum vxge_debug_level{}.
- */
-#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) do { \
- if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
- (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
- if ((mask & VXGE_DEBUG_MASK) == mask) \
- printk(fmt "\n", ##__VA_ARGS__); \
-} while (0)
-#else
-#define vxge_debug_ll(level, mask, fmt, ...)
-#endif
-
-enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
- struct __vxge_hw_vpath_handle **vpath_handles,
- u32 vpath_count,
- u8 *mtable,
- u8 *itable,
- u32 itable_size);
-
-enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- enum vxge_hw_rth_algoritms algorithm,
- struct vxge_hw_rth_hash_types *hash_type,
- u16 bucket_size);
-
-enum vxge_hw_status
-__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
-
-#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
-#define VXGE_HW_MAX_POLLING_COUNT 100
-
-void
-vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
- u32 *minor, u32 *build);
-
-enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
- int size);
-
-enum vxge_hw_status
-vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
- struct eprom_image *eprom_image_data);
-
-int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
deleted file mode 100644
index 4d91026485ae..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ /dev/null
@@ -1,1154 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/ethtool.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/etherdevice.h>
-
-#include "vxge-ethtool.h"
-
-static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
- {"\n DRIVER STATISTICS"},
- {"vpaths_opened"},
- {"vpath_open_fail_cnt"},
- {"link_up_cnt"},
- {"link_down_cnt"},
- {"tx_frms"},
- {"tx_errors"},
- {"tx_bytes"},
- {"txd_not_free"},
- {"txd_out_of_desc"},
- {"rx_frms"},
- {"rx_errors"},
- {"rx_bytes"},
- {"rx_mcast"},
- {"pci_map_fail_cnt"},
- {"skb_alloc_fail_cnt"}
-};
-
-/**
- * vxge_ethtool_set_link_ksettings - Sets different link parameters.
- * @dev: device pointer.
- * @cmd: pointer to the structure with parameters given by ethtool to set
- * link information.
- *
- * The function sets different link parameters provided by the user onto
- * the NIC.
- * Return value:
- * 0 on success.
- */
-static int
-vxge_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- /* We currently only support 10Gb/FULL */
- if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
- (cmd->base.speed != SPEED_10000) ||
- (cmd->base.duplex != DUPLEX_FULL))
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * vxge_ethtool_get_link_ksettings - Return link specific information.
- * @dev: device pointer.
- * @cmd: pointer to the structure with parameters given by ethtool
- * to return link information.
- *
- * Returns link specific information like speed, duplex etc.. to ethtool.
- * Return value :
- * return 0 on success.
- */
-static int vxge_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
-
- if (netif_carrier_ok(dev)) {
- cmd->base.speed = SPEED_10000;
- cmd->base.duplex = DUPLEX_FULL;
- } else {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-/**
- * vxge_ethtool_gdrvinfo - Returns driver specific information.
- * @dev: device pointer.
- * @info: pointer to the structure with parameters given by ethtool to
- * return driver information.
- *
- * Returns driver specefic information like name, version etc.. to ethtool.
- */
-static void vxge_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
-}
-
-/**
- * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer.
- * @dev: device pointer.
- * @regs: pointer to the structure with parameters given by ethtool for
- * dumping the registers.
- * @space: The input argument into which all the registers are dumped.
- *
- * Dumps the vpath register space of Titan NIC into the user given
- * buffer area.
- */
-static void vxge_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
-{
- int index, offset;
- enum vxge_hw_status status;
- u64 reg;
- u64 *reg_space = (u64 *)space;
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
- regs->version = vdev->pdev->subsystem_device;
- for (index = 0; index < vdev->no_of_vpath; index++) {
- for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg);
- offset += 8) {
- status = vxge_hw_mgmt_reg_read(hldev,
- vxge_hw_mgmt_reg_type_vpath,
- vdev->vpaths[index].device_id,
- offset, &reg);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Getting reg dump Failed",
- __func__, __LINE__);
- return;
- }
- *reg_space++ = reg;
- }
- }
-}
-
-/**
- * vxge_ethtool_idnic - To physically identify the nic on the system.
- * @dev : device pointer.
- * @state : requested LED state
- *
- * Used to physically identify the NIC on the system.
- * 0 on success
- */
-static int vxge_ethtool_idnic(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * vxge_ethtool_getpause_data - Pause frame frame generation and reception.
- * @dev : device pointer.
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * Returns the Pause frame generation and reception capability of the NIC.
- * Return value:
- * void
- */
-static void vxge_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
-}
-
-/**
- * vxge_ethtool_setpause_data - set/reset pause frame generation.
- * @dev : device pointer.
- * @ep : pointer to the structure with pause parameters given by ethtool.
- * Description:
- * It can be used to set or reset Pause frame generation or reception
- * support of the NIC.
- * Return value:
- * int, returns 0 on Success
- */
-static int vxge_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
-
- vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
-
- vdev->config.tx_pause_enable = ep->tx_pause;
- vdev->config.rx_pause_enable = ep->rx_pause;
-
- return 0;
-}
-
-static void vxge_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats, u64 *tmp_stats)
-{
- int j, k;
- enum vxge_hw_status status;
- enum vxge_hw_status swstatus;
- struct vxge_vpath *vpath = NULL;
- struct vxgedev *vdev = netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
- struct vxge_hw_xmac_stats *xmac_stats;
- struct vxge_hw_device_stats_sw_info *sw_stats;
- struct vxge_hw_device_stats_hw_info *hw_stats;
-
- u64 *ptr = tmp_stats;
-
- memset(tmp_stats, 0,
- vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));
-
- xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
- if (xmac_stats == NULL) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for xmac_stats",
- __func__, __LINE__);
- return;
- }
-
- sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
- GFP_KERNEL);
- if (sw_stats == NULL) {
- kfree(xmac_stats);
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for sw_stats",
- __func__, __LINE__);
- return;
- }
-
- hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
- GFP_KERNEL);
- if (hw_stats == NULL) {
- kfree(xmac_stats);
- kfree(sw_stats);
- vxge_debug_init(VXGE_ERR,
- "%s : %d Memory Allocation failed for hw_stats",
- __func__, __LINE__);
- return;
- }
-
- *ptr++ = 0;
- status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
- if (status != VXGE_HW_OK) {
- if (status != VXGE_HW_ERR_PRIVILEGED_OPERATION) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Failure in getting xmac stats",
- __func__, __LINE__);
- }
- }
- swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
- if (swstatus != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d Failure in getting sw stats",
- __func__, __LINE__);
- }
-
- status = vxge_hw_device_stats_get(hldev, hw_stats);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s : %d hw_stats_get error", __func__, __LINE__);
- }
-
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_hw_info *vpath_info;
-
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = hw_stats->vpath_info[j];
- if (!vpath_info) {
- memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
- VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
- ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
- VXGE_HW_VPATH_RX_STATS_LEN);
- continue;
- }
-
- *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
- *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
- *ptr++ = vpath_info->tx_stats.tx_data_octets;
- *ptr++ = vpath_info->tx_stats.tx_mcast_frms;
- *ptr++ = vpath_info->tx_stats.tx_bcast_frms;
- *ptr++ = vpath_info->tx_stats.tx_ucast_frms;
- *ptr++ = vpath_info->tx_stats.tx_tagged_frms;
- *ptr++ = vpath_info->tx_stats.tx_vld_ip;
- *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
- *ptr++ = vpath_info->tx_stats.tx_icmp;
- *ptr++ = vpath_info->tx_stats.tx_tcp;
- *ptr++ = vpath_info->tx_stats.tx_rst_tcp;
- *ptr++ = vpath_info->tx_stats.tx_udp;
- *ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
- *ptr++ = vpath_info->tx_stats.tx_lost_ip;
- *ptr++ = vpath_info->tx_stats.tx_parse_error;
- *ptr++ = vpath_info->tx_stats.tx_tcp_offload;
- *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
- *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
- *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
- *ptr++ = vpath_info->rx_stats.rx_vld_frms;
- *ptr++ = vpath_info->rx_stats.rx_offload_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
- *ptr++ = vpath_info->rx_stats.rx_data_octets;
- *ptr++ = vpath_info->rx_stats.rx_offload_octets;
- *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
- *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
- *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
- *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
- *ptr++ = vpath_info->rx_stats.rx_tagged_frms;
- *ptr++ = vpath_info->rx_stats.rx_long_frms;
- *ptr++ = vpath_info->rx_stats.rx_usized_frms;
- *ptr++ = vpath_info->rx_stats.rx_osized_frms;
- *ptr++ = vpath_info->rx_stats.rx_frag_frms;
- *ptr++ = vpath_info->rx_stats.rx_jabber_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
- *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
- *ptr++ = vpath_info->rx_stats.rx_ip;
- *ptr++ = vpath_info->rx_stats.rx_accepted_ip;
- *ptr++ = vpath_info->rx_stats.rx_ip_octets;
- *ptr++ = vpath_info->rx_stats.rx_err_ip;
- *ptr++ = vpath_info->rx_stats.rx_icmp;
- *ptr++ = vpath_info->rx_stats.rx_tcp;
- *ptr++ = vpath_info->rx_stats.rx_udp;
- *ptr++ = vpath_info->rx_stats.rx_err_tcp;
- *ptr++ = vpath_info->rx_stats.rx_lost_frms;
- *ptr++ = vpath_info->rx_stats.rx_lost_ip;
- *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
- *ptr++ = vpath_info->rx_stats.rx_various_discard;
- *ptr++ = vpath_info->rx_stats.rx_sleep_discard;
- *ptr++ = vpath_info->rx_stats.rx_red_discard;
- *ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
- *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
- }
- *ptr++ = 0;
- for (k = 0; k < vdev->max_config_port; k++) {
- *ptr++ = xmac_stats->aggr_stats[k].tx_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
- *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
- *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
- *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
- *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
- }
- *ptr++ = 0;
- for (k = 0; k < vdev->max_config_port; k++) {
- *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_data_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
- *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
- *ptr++ = xmac_stats->port_stats[k].tx_icmp;
- *ptr++ = xmac_stats->port_stats[k].tx_tcp;
- *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
- *ptr++ = xmac_stats->port_stats[k].tx_udp;
- *ptr++ = xmac_stats->port_stats[k].tx_parse_error;
- *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
- *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
- *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
- *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
- *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
- *ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_data_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_long_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
- *ptr++ = xmac_stats->port_stats[k].rx_err_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_icmp;
- *ptr++ = xmac_stats->port_stats[k].rx_tcp;
- *ptr++ = xmac_stats->port_stats[k].rx_udp;
- *ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
- *ptr++ = xmac_stats->port_stats[k].rx_pause_count;
- *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
- *ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
- *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
- *ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_len_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_red_discard;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
- *ptr++ = xmac_stats->port_stats[k].rx_local_fault;
- *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
- *ptr++ = xmac_stats->port_stats[k].rx_jettison;
- *ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
- }
-
- *ptr++ = 0;
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_sw_info *vpath_info;
-
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
- &sw_stats->vpath_info[j];
- *ptr++ = vpath_info->soft_reset_cnt;
- *ptr++ = vpath_info->error_stats.unknown_alarms;
- *ptr++ = vpath_info->error_stats.network_sustained_fault;
- *ptr++ = vpath_info->error_stats.network_sustained_ok;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
- *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
- *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
- *ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
- *ptr++ = vpath_info->error_stats.statsb_drop_timeout;
- *ptr++ = vpath_info->error_stats.target_illegal_access;
- *ptr++ = vpath_info->error_stats.ini_serr_det;
- *ptr++ = vpath_info->error_stats.prc_ring_bumps;
- *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
- *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
- *ptr++ = vpath_info->error_stats.prc_quanta_size_err;
- *ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.usage_max;
- *ptr++ = vpath_info->ring_stats.common_stats.
- reserve_free_swaps_cnt;
- *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
- *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
- *ptr++ = vpath_info->fifo_stats.common_stats.
- reserve_free_swaps_cnt;
- *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
- *ptr++ = vpath_info->fifo_stats.total_posts;
- *ptr++ = vpath_info->fifo_stats.total_buffers;
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
- }
-
- *ptr++ = 0;
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_hw_vpath_stats_hw_info *vpath_info;
- vpath = &vdev->vpaths[k];
- j = vpath->device_id;
- vpath_info = hw_stats->vpath_info[j];
- if (!vpath_info) {
- memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
- ptr += VXGE_HW_VPATH_STATS_LEN;
- continue;
- }
- *ptr++ = vpath_info->ini_num_mwr_sent;
- *ptr++ = vpath_info->ini_num_mrd_sent;
- *ptr++ = vpath_info->ini_num_cpl_rcvd;
- *ptr++ = vpath_info->ini_num_mwr_byte_sent;
- *ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
- *ptr++ = vpath_info->wrcrdtarb_xoff;
- *ptr++ = vpath_info->rdcrdtarb_xoff;
- *ptr++ = vpath_info->vpath_genstats_count0;
- *ptr++ = vpath_info->vpath_genstats_count1;
- *ptr++ = vpath_info->vpath_genstats_count2;
- *ptr++ = vpath_info->vpath_genstats_count3;
- *ptr++ = vpath_info->vpath_genstats_count4;
- *ptr++ = vpath_info->vpath_genstats_count5;
- *ptr++ = vpath_info->prog_event_vnum0;
- *ptr++ = vpath_info->prog_event_vnum1;
- *ptr++ = vpath_info->prog_event_vnum2;
- *ptr++ = vpath_info->prog_event_vnum3;
- *ptr++ = vpath_info->rx_multi_cast_frame_discard;
- *ptr++ = vpath_info->rx_frm_transferred;
- *ptr++ = vpath_info->rxd_returned;
- *ptr++ = vpath_info->rx_mpa_len_fail_frms;
- *ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
- *ptr++ = vpath_info->rx_mpa_crc_fail_frms;
- *ptr++ = vpath_info->rx_permitted_frms;
- *ptr++ = vpath_info->rx_vp_reset_discarded_frms;
- *ptr++ = vpath_info->rx_wol_frms;
- *ptr++ = vpath_info->tx_vp_reset_discarded_frms;
- }
-
- *ptr++ = 0;
- *ptr++ = vdev->stats.vpaths_open;
- *ptr++ = vdev->stats.vpath_open_fail;
- *ptr++ = vdev->stats.link_up;
- *ptr++ = vdev->stats.link_down;
-
- for (k = 0; k < vdev->no_of_vpath; k++) {
- *ptr += vdev->vpaths[k].fifo.stats.tx_frms;
- *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
- *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
- *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
- *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
- *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
- *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
- *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
- *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
- *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
- vdev->vpaths[k].ring.stats.pci_map_fail;
- *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
- }
-
- ptr += 12;
-
- kfree(xmac_stats);
- kfree(sw_stats);
- kfree(hw_stats);
-}
-
-static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
- u8 *data)
-{
- int stat_size = 0;
- int i, j;
- struct vxgedev *vdev = netdev_priv(dev);
- switch (stringset) {
- case ETH_SS_STATS:
- vxge_add_string("VPATH STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ttl_eth_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ucast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_rst_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_unknown_proto_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lost_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_parse_error_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_offload_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_retx_tcp_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lost_ip_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_eth_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_mcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_bcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_long_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_usized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_osized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frag_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jabber_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lost_ip_offload_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_various_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_sleep_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_red_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_queue_full_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->max_config_port; i++) {
- vxge_add_string("tx_frms_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_discarded_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_errored_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frms_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_discarded_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_errored_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->max_config_port; i++) {
- vxge_add_string("tx_ttl_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ttl_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_mcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_bcast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_ucast_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vld_ip_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_rst_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_parse_error_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_unknown_protocol_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_marker_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_drop_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_char2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_char1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_column2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_xgmii_column1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_any_err_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_drop_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_data_octects_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_offload_octects_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_mcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vld_bcast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tagged_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_long_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_usized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_osized_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_frag_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jabber_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_accepted_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_ip_octets_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_icmp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_tcp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_udp_%d\t\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_err_tcp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_pause_count_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_discard_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_ip_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_drop_udp_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_marker_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_fcs_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_switch_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_len_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_rpa_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_rts_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_trash_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_buff_full_discard_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_red_discard_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_char1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_column1_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_char2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_local_fault_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_xgmii_column2_match_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_jettison_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_remote_fault_%d\t\t\t",
- &stat_size, data, i);
- }
-
- vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("soft_reset_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("unknown_alarms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("network_sustained_fault_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("network_sustained_ok_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("statsb_pif_chain_error_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("statsb_drop_timeout_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("target_illegal_access_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_serr_det_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_ring_bumps_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("prc_quanta_size_err_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_full_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_usage_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_usage_max_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
- &stat_size, data, i);
- vxge_add_string("ring_total_compl_cnt_%d\t\t",
- &stat_size, data, i);
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
- &stat_size, data, j, i);
- vxge_add_string("fifo_full_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_usage_cnt_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_usage_max_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_compl_cnt_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_posts_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("fifo_total_buffers_%d\t\t",
- &stat_size, data, i);
- for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
- vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
- &stat_size, data, j, i);
- }
-
- vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
- &stat_size, data, "");
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count0_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count1_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count2_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count3_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count4_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("vpath_genstats_count5_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum0_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum1_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum2_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("prog_event_vnum3_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_multi_cast_frame_discard_%d\t",
- &stat_size, data, i);
- vxge_add_string("rx_frm_transferred_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rxd_returned_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_permitted_frms_%d\t\t",
- &stat_size, data, i);
- vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
- &stat_size, data, i);
- vxge_add_string("rx_wol_frms_%d\t\t\t",
- &stat_size, data, i);
- vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
- &stat_size, data, i);
- }
-
- memcpy(data + stat_size, &ethtool_driver_stats_keys,
- sizeof(ethtool_driver_stats_keys));
- }
-}
-
-static int vxge_ethtool_get_regs_len(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
-}
-
-static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- switch (sset) {
- case ETH_SS_STATS:
- return VXGE_TITLE_LEN +
- (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) +
- (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) +
- (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) +
- (vdev->no_of_vpath * VXGE_SW_STATS_LEN) +
- DRIVER_STAT_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
- printk(KERN_INFO "Single Function Mode is required to flash the"
- " firmware\n");
- return -EINVAL;
- }
-
- if (netif_running(dev)) {
- printk(KERN_INFO "Interface %s must be down to flash the "
- "firmware\n", dev->name);
- return -EBUSY;
- }
-
- return vxge_fw_upgrade(vdev, parms->data, 1);
-}
-
-static const struct ethtool_ops vxge_ethtool_ops = {
- .get_drvinfo = vxge_ethtool_gdrvinfo,
- .get_regs_len = vxge_ethtool_get_regs_len,
- .get_regs = vxge_ethtool_gregs,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = vxge_ethtool_getpause_data,
- .set_pauseparam = vxge_ethtool_setpause_data,
- .get_strings = vxge_ethtool_get_strings,
- .set_phys_id = vxge_ethtool_idnic,
- .get_sset_count = vxge_ethtool_get_sset_count,
- .get_ethtool_stats = vxge_get_ethtool_stats,
- .flash_device = vxge_fw_flash,
- .get_link_ksettings = vxge_ethtool_get_link_ksettings,
- .set_link_ksettings = vxge_ethtool_set_link_ksettings,
-};
-
-void vxge_initialize_ethtool_ops(struct net_device *ndev)
-{
- ndev->ethtool_ops = &vxge_ethtool_ops;
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
deleted file mode 100644
index 065a2c0429a4..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef _VXGE_ETHTOOL_H
-#define _VXGE_ETHTOOL_H
-
-#include "vxge-main.h"
-
-/* Ethtool related variables and Macros. */
-static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
-
-#define VXGE_TITLE_LEN 5
-#define VXGE_HW_VPATH_STATS_LEN 27
-#define VXGE_HW_AGGR_STATS_LEN 13
-#define VXGE_HW_PORT_STATS_LEN 94
-#define VXGE_HW_VPATH_TX_STATS_LEN 19
-#define VXGE_HW_VPATH_RX_STATS_LEN 42
-#define VXGE_SW_STATS_LEN 60
-#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\
- VXGE_HW_AGGR_STATS_LEN +\
- VXGE_HW_PORT_STATS_LEN +\
- VXGE_HW_VPATH_TX_STATS_LEN +\
- VXGE_HW_VPATH_RX_STATS_LEN)
-
-#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN)
-#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN)
-
-/* Maximum flicker time of adapter LED */
-#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */
-#define VXGE_FLICKER_ON 1
-#define VXGE_FLICKER_OFF 0
-
-#define vxge_add_string(fmt, size, buf, ...) {\
- snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \
- *size += ETH_GSTRING_LEN; \
-}
-
-#endif /*_VXGE_ETHTOOL_H*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
deleted file mode 100644
index fa5d4ddf429b..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ /dev/null
@@ -1,4808 +0,0 @@
-/******************************************************************************
-* This software may be used and distributed according to the terms of
-* the GNU General Public License (GPL), incorporated herein by reference.
-* Drivers based on or derived from this code fall under the GPL and must
-* retain the authorship, copyright and license notice. This file is not
-* a complete program and may only be used when the entire operating
-* system is licensed under the GPL.
-* See the file COPYING in this distribution for more information.
-*
-* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
-* Virtualized Server Adapter.
-* Copyright(c) 2002-2010 Exar Corp.
-*
-* The module loadable parameters that are supported by the driver and a brief
-* explanation of all the variables:
-* vlan_tag_strip:
-* Strip VLAN Tag enable/disable. Instructs the device to remove
-* the VLAN tag from all received tagged frames that are not
-* replicated at the internal L2 switch.
-* 0 - Do not strip the VLAN tag.
-* 1 - Strip the VLAN tag.
-*
-* addr_learn_en:
-* Enable learning the mac address of the guest OS interface in
-* a virtualization environment.
-* 0 - DISABLE
-* 1 - ENABLE
-*
-* max_config_port:
-* Maximum number of port to be supported.
-* MIN -1 and MAX - 2
-*
-* max_config_vpath:
-* This configures the maximum no of VPATH configures for each
-* device function.
-* MIN - 1 and MAX - 17
-*
-* max_config_dev:
-* This configures maximum no of Device function to be enabled.
-* MIN - 1 and MAX - 17
-*
-******************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-#include <net/ip.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/firmware.h>
-#include <linux/net_tstamp.h>
-#include <linux/prefetch.h>
-#include <linux/module.h>
-#include "vxge-main.h"
-#include "vxge-reg.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
- "Virtualized Server Adapter");
-
-static const struct pci_device_id vxge_id_table[] = {
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
- PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
- PCI_ANY_ID},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, vxge_id_table);
-
-VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
-VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
-VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
-VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
-VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
-VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
-
-static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
- {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
-static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
- {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
-module_param_array(bw_percentage, uint, NULL, 0);
-
-static struct vxge_drv_config *driver_config;
-static void vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-static inline int is_vxge_card_up(struct vxgedev *vdev)
-{
- return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-}
-
-static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
-{
- struct sk_buff **skb_ptr = NULL;
- struct sk_buff **temp;
-#define NR_SKB_COMPLETED 16
- struct sk_buff *completed[NR_SKB_COMPLETED];
- int more;
-
- do {
- more = 0;
- skb_ptr = completed;
-
- if (__netif_tx_trylock(fifo->txq)) {
- vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
- NR_SKB_COMPLETED, &more);
- __netif_tx_unlock(fifo->txq);
- }
-
- /* free SKBs */
- for (temp = completed; temp != skb_ptr; temp++)
- dev_consume_skb_irq(*temp);
- } while (more);
-}
-
-static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
-{
- int i;
-
- /* Complete all transmits */
- for (i = 0; i < vdev->no_of_vpath; i++)
- VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
-}
-
-static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
-{
- int i;
- struct vxge_ring *ring;
-
- /* Complete all receives*/
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
- vxge_hw_vpath_poll_rx(ring->handle);
- }
-}
-
-/*
- * vxge_callback_link_up
- *
- * This function is called during interrupt context to notify link up state
- * change.
- */
-static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- vdev->ndev->name, __func__, __LINE__);
- netdev_notice(vdev->ndev, "Link Up\n");
- vdev->stats.link_up++;
-
- netif_carrier_on(vdev->ndev);
- netif_tx_wake_all_queues(vdev->ndev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_callback_link_down
- *
- * This function is called during interrupt context to notify link down state
- * change.
- */
-static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
- netdev_notice(vdev->ndev, "Link Down\n");
-
- vdev->stats.link_down++;
- netif_carrier_off(vdev->ndev);
- netif_tx_stop_all_queues(vdev->ndev);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_rx_alloc
- *
- * Allocate SKB.
- */
-static struct sk_buff *
-vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
-{
- struct net_device *dev;
- struct sk_buff *skb;
- struct vxge_rx_priv *rx_priv;
-
- dev = ring->ndev;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
-
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
-
- /* try to allocate skb first. this one may fail */
- skb = netdev_alloc_skb(dev, skb_size +
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
- if (skb == NULL) {
- vxge_debug_mem(VXGE_ERR,
- "%s: out of memory to allocate SKB", dev->name);
- ring->stats.skb_alloc_fail++;
- return NULL;
- }
-
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d Skb : 0x%p", ring->ndev->name,
- __func__, __LINE__, skb);
-
- skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
-
- rx_priv->skb = skb;
- rx_priv->skb_data = NULL;
- rx_priv->data_size = skb_size;
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return skb;
-}
-
-/*
- * vxge_rx_map
- */
-static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
-{
- struct vxge_rx_priv *rx_priv;
- dma_addr_t dma_addr;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
-
- rx_priv->skb_data = rx_priv->skb->data;
- dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) {
- ring->stats.pci_map_fail++;
- return -EIO;
- }
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
- ring->ndev->name, __func__, __LINE__,
- (unsigned long long)dma_addr);
- vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
-
- rx_priv->data_dma = dma_addr;
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return 0;
-}
-
-/*
- * vxge_rx_initial_replenish
- * Allocation of RxD as an initial replenish procedure.
- */
-static enum vxge_hw_status
-vxge_rx_initial_replenish(void *dtrh, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct vxge_rx_priv *rx_priv;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- if (vxge_rx_alloc(dtrh, ring,
- VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
- return VXGE_HW_FAIL;
-
- if (vxge_rx_map(dtrh, ring)) {
- rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
- dev_kfree_skb(rx_priv->skb);
-
- return VXGE_HW_FAIL;
- }
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-
- return VXGE_HW_OK;
-}
-
-static inline void
-vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
- int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
-{
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- skb_record_rx_queue(skb, ring->driver_id);
- skb->protocol = eth_type_trans(skb, ring->ndev);
-
- u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.rx_frms++;
- ring->stats.rx_bytes += pkt_length;
-
- if (skb->pkt_type == PACKET_MULTICAST)
- ring->stats.rx_mcast++;
- u64_stats_update_end(&ring->stats.syncp);
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d skb protocol = %d",
- ring->ndev->name, __func__, __LINE__, skb->protocol);
-
- if (ext_info->vlan &&
- ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
- napi_gro_receive(ring->napi_p, skb);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
-}
-
-static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
- struct vxge_rx_priv *rx_priv)
-{
- dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
- vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
-}
-
-static inline void vxge_post(int *dtr_cnt, void **first_dtr,
- void *post_dtr, struct __vxge_hw_ring *ringh)
-{
- int dtr_count = *dtr_cnt;
- if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
- if (*first_dtr)
- vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
- *first_dtr = post_dtr;
- } else
- vxge_hw_ring_rxd_post_post(ringh, post_dtr);
- dtr_count++;
- *dtr_cnt = dtr_count;
-}
-
-/*
- * vxge_rx_1b_compl
- *
- * If the interrupt is because of a received frame or if the receive ring
- * contains fresh as yet un-processed frames, this function is called.
- */
-static enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
- u8 t_code, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct net_device *dev = ring->ndev;
- unsigned int dma_sizes;
- void *first_dtr = NULL;
- int dtr_cnt = 0;
- int data_size;
- dma_addr_t data_dma;
- int pkt_length;
- struct sk_buff *skb;
- struct vxge_rx_priv *rx_priv;
- struct vxge_hw_ring_rxd_info ext_info;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
-
- if (ring->budget <= 0)
- goto out;
-
- do {
- prefetch((char *)dtr + L1_CACHE_BYTES);
- rx_priv = vxge_hw_ring_rxd_private_get(dtr);
- skb = rx_priv->skb;
- data_size = rx_priv->data_size;
- data_dma = rx_priv->data_dma;
- prefetch(rx_priv->skb_data);
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d skb = 0x%p",
- ring->ndev->name, __func__, __LINE__, skb);
-
- vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
- pkt_length = dma_sizes;
-
- pkt_length -= ETH_FCS_LEN;
-
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s:%d Packet Length = %d",
- ring->ndev->name, __func__, __LINE__, pkt_length);
-
- vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
-
- /* check skb validity */
- vxge_assert(skb);
-
- prefetch((char *)skb + L1_CACHE_BYTES);
- if (unlikely(t_code)) {
- if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
- VXGE_HW_OK) {
-
- ring->stats.rx_errors++;
- vxge_debug_rx(VXGE_TRACE,
- "%s: %s :%d Rx T_code is %d",
- ring->ndev->name, __func__,
- __LINE__, t_code);
-
- /* If the t_code is not supported and if the
- * t_code is other than 0x5 (unparseable packet
- * such as unknown UPV6 header), Drop it !!!
- */
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- ring->stats.rx_dropped++;
- continue;
- }
- }
-
- if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
- if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
- if (!vxge_rx_map(dtr, ring)) {
- skb_put(skb, pkt_length);
-
- dma_unmap_single(&ring->pdev->dev,
- data_dma, data_size,
- DMA_FROM_DEVICE);
-
- vxge_hw_ring_rxd_pre_post(ringh, dtr);
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- } else {
- dev_kfree_skb(rx_priv->skb);
- rx_priv->skb = skb;
- rx_priv->data_size = data_size;
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- ring->stats.rx_dropped++;
- break;
- }
- } else {
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- ring->stats.rx_dropped++;
- break;
- }
- } else {
- struct sk_buff *skb_up;
-
- skb_up = netdev_alloc_skb(dev, pkt_length +
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
- if (skb_up != NULL) {
- skb_reserve(skb_up,
- VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
-
- dma_sync_single_for_cpu(&ring->pdev->dev,
- data_dma, data_size,
- DMA_FROM_DEVICE);
-
- vxge_debug_mem(VXGE_TRACE,
- "%s: %s:%d skb_up = %p",
- ring->ndev->name, __func__,
- __LINE__, skb);
- memcpy(skb_up->data, skb->data, pkt_length);
-
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr,
- ringh);
- /* will netif_rx small SKB instead */
- skb = skb_up;
- skb_put(skb, pkt_length);
- } else {
- vxge_re_pre_post(dtr, ring, rx_priv);
-
- vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
- vxge_debug_rx(VXGE_ERR,
- "%s: vxge_rx_1b_compl: out of "
- "memory", dev->name);
- ring->stats.skb_alloc_fail++;
- break;
- }
- }
-
- if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
- !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
- (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
- ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
- ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
-
-
- if (ring->rx_hwts) {
- struct skb_shared_hwtstamps *skb_hwts;
- u32 ns = *(u32 *)(skb->head + pkt_length);
-
- skb_hwts = skb_hwtstamps(skb);
- skb_hwts->hwtstamp = ns_to_ktime(ns);
- }
-
- /* rth_hash_type and rth_it_hit are non-zero regardless of
- * whether rss is enabled. Only the rth_value is zero/non-zero
- * if rss is disabled/enabled, so key off of that.
- */
- if (ext_info.rth_value)
- skb_set_hash(skb, ext_info.rth_value,
- PKT_HASH_TYPE_L3);
-
- vxge_rx_complete(ring, skb, ext_info.vlan,
- pkt_length, &ext_info);
-
- ring->budget--;
- ring->pkts_processed++;
- if (!ring->budget)
- break;
-
- } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
- &t_code) == VXGE_HW_OK);
-
- if (first_dtr)
- vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
-
-out:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...",
- __func__, __LINE__);
- return VXGE_HW_OK;
-}
-
-/*
- * vxge_xmit_compl
- *
- * If an interrupt was raised to indicate DMA complete of the Tx packet,
- * this function is called. It identifies the last TxD whose buffer was
- * freed and frees all skbs whose data have already DMA'ed into the NICs
- * internal memory.
- */
-static enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata,
- struct sk_buff ***skb_ptr, int nr_skb, int *more)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- struct sk_buff *skb, **done_skb = *skb_ptr;
- int pkt_cnt = 0;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Entered....", __func__, __LINE__);
-
- do {
- int frg_cnt;
- skb_frag_t *frag;
- int i = 0, j;
- struct vxge_tx_priv *txd_priv =
- vxge_hw_fifo_txdl_private_get(dtr);
-
- skb = txd_priv->skb;
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
-
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d fifo_hw = %p dtr = %p "
- "tcode = 0x%x", fifo->ndev->name, __func__,
- __LINE__, fifo_hw, dtr, t_code);
- /* check skb validity */
- vxge_assert(skb);
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
- fifo->ndev->name, __func__, __LINE__,
- skb, txd_priv, frg_cnt);
- if (unlikely(t_code)) {
- fifo->stats.tx_errors++;
- vxge_debug_tx(VXGE_ERR,
- "%s: tx: dtr %p completed due to "
- "error t_code %01x", fifo->ndev->name,
- dtr, t_code);
- vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
- }
-
- /* for unfragmented skb */
- dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (j = 0; j < frg_cnt; j++) {
- dma_unmap_page(&fifo->pdev->dev,
- txd_priv->dma_buffers[i++],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-
- /* Updating the statistics block */
- u64_stats_update_begin(&fifo->stats.syncp);
- fifo->stats.tx_frms++;
- fifo->stats.tx_bytes += skb->len;
- u64_stats_update_end(&fifo->stats.syncp);
-
- *done_skb++ = skb;
-
- if (--nr_skb <= 0) {
- *more = 1;
- break;
- }
-
- pkt_cnt++;
- if (pkt_cnt > fifo->indicate_max_pkts)
- break;
-
- } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
- &dtr, &t_code) == VXGE_HW_OK);
-
- *skb_ptr = done_skb;
- if (netif_tx_queue_stopped(fifo->txq))
- netif_tx_wake_queue(fifo->txq);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- fifo->ndev->name, __func__, __LINE__);
- return VXGE_HW_OK;
-}
-
-/* select a vpath to transmit the packet */
-static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
-{
- u16 queue_len, counter = 0;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *ip;
- struct tcphdr *th;
-
- ip = ip_hdr(skb);
-
- if (!ip_is_fragment(ip)) {
- th = (struct tcphdr *)(((unsigned char *)ip) +
- ip->ihl*4);
-
- queue_len = vdev->no_of_vpath;
- counter = (ntohs(th->source) +
- ntohs(th->dest)) &
- vdev->vpath_selector[queue_len - 1];
- if (counter >= queue_len)
- counter = queue_len - 1;
- }
- }
- return counter;
-}
-
-static enum vxge_hw_status vxge_search_mac_addr_in_list(
- struct vxge_vpath *vpath, u64 del_mac)
-{
- struct list_head *entry, *next;
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
- return TRUE;
- }
- return FALSE;
-}
-
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct vxge_mac_addrs *new_mac_entry;
- u8 *mac_address = NULL;
-
- if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
- return TRUE;
-
- new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
- if (!new_mac_entry) {
- vxge_debug_mem(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- return FALSE;
- }
-
- list_add(&new_mac_entry->item, &vpath->mac_addr_list);
-
- /* Copy the new mac address to the list */
- mac_address = (u8 *)&new_mac_entry->macaddr;
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- new_mac_entry->state = mac->state;
- vpath->mac_addr_cnt++;
-
- if (is_multicast_ether_addr(mac->macaddr))
- vpath->mcast_addr_cnt++;
-
- return TRUE;
-}
-
-/* Add a mac address to DA table */
-static enum vxge_hw_status
-vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
-
- if (is_multicast_ether_addr(mac->macaddr))
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
- else
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
- mac->macmask, duplicate_mode);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config add entry failed for vpath:%d",
- vpath->device_id);
- } else
- if (FALSE == vxge_mac_list_add(vpath, mac))
- status = -EPERM;
-
- return status;
-}
-
-static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
-{
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- u64 mac_addr = 0, vpath_vector = 0;
- int vpath_idx = 0;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath = NULL;
-
- mac_address = (u8 *)&mac_addr;
- memcpy(mac_address, mac_header, ETH_ALEN);
-
- /* Is this mac address already in the list? */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vxge_search_mac_addr_in_list(vpath, mac_addr))
- return vpath_idx;
- }
-
- memset(&mac_info, 0, sizeof(struct macInfo));
- memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
-
- /* Any vpath has room to add mac address to its da table? */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
- /* Add this mac address to this vpath */
- mac_info.vpath_no = vpath_idx;
- mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info);
- if (status != VXGE_HW_OK)
- return -EPERM;
- return vpath_idx;
- }
- }
-
- mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
- vpath_idx = 0;
- mac_info.vpath_no = vpath_idx;
- /* Is the first vpath already selected as catch-basin ? */
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
- /* Add this mac address to this vpath */
- if (FALSE == vxge_mac_list_add(vpath, &mac_info))
- return -EPERM;
- return vpath_idx;
- }
-
- /* Select first vpath as catch-basin */
- vpath_vector = vxge_mBIT(vpath->device_id);
- status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- vpath_vector);
- if (status != VXGE_HW_OK) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Unable to set the vpath-%d in catch-basin mode",
- VXGE_DRIVER_NAME, vpath->device_id);
- return -EPERM;
- }
-
- if (FALSE == vxge_mac_list_add(vpath, &mac_info))
- return -EPERM;
-
- return vpath_idx;
-}
-
-/**
- * vxge_xmit
- * @skb : the socket buffer containing the Tx data.
- * @dev : device pointer.
- *
- * This function is the Tx entry point of the driver. Neterion NIC supports
- * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
-*/
-static netdev_tx_t
-vxge_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct vxge_fifo *fifo = NULL;
- void *dtr_priv;
- void *dtr = NULL;
- struct vxgedev *vdev = NULL;
- enum vxge_hw_status status;
- int frg_cnt, first_frg_len;
- skb_frag_t *frag;
- int i = 0, j = 0, avail;
- u64 dma_pointer;
- struct vxge_tx_priv *txdl_priv = NULL;
- struct __vxge_hw_fifo *fifo_hw;
- int offload_type;
- int vpath_no = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- dev->name, __func__, __LINE__);
-
- /* A buffer with no data will be dropped */
- if (unlikely(skb->len <= 0)) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Buffer has no data..", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- vdev = netdev_priv(dev);
-
- if (unlikely(!is_vxge_card_up(vdev))) {
- vxge_debug_tx(VXGE_ERR,
- "%s: vdev not initialized", dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (vdev->config.addr_learn_en) {
- vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
- if (vpath_no == -EPERM) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Failed to store the mac address",
- dev->name);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- }
-
- if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
- vpath_no = skb_get_queue_mapping(skb);
- else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
- vpath_no = vxge_get_vpath_no(vdev, skb);
-
- vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
-
- if (vpath_no >= vdev->no_of_vpath)
- vpath_no = 0;
-
- fifo = &vdev->vpaths[vpath_no].fifo;
- fifo_hw = fifo->handle;
-
- if (netif_tx_queue_stopped(fifo->txq))
- return NETDEV_TX_BUSY;
-
- avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
- if (avail == 0) {
- vxge_debug_tx(VXGE_ERR,
- "%s: No free TXDs available", dev->name);
- fifo->stats.txd_not_free++;
- goto _exit0;
- }
-
- /* Last TXD? Stop tx queue to avoid dropping packets. TX
- * completion will resume the queue.
- */
- if (avail == 1)
- netif_tx_stop_queue(fifo->txq);
-
- status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
- if (unlikely(status != VXGE_HW_OK)) {
- vxge_debug_tx(VXGE_ERR,
- "%s: Out of descriptors .", dev->name);
- fifo->stats.txd_out_of_desc++;
- goto _exit0;
- }
-
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
- dev->name, __func__, __LINE__,
- fifo_hw, dtr, dtr_priv);
-
- if (skb_vlan_tag_present(skb)) {
- u16 vlan_tag = skb_vlan_tag_get(skb);
- vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
- }
-
- first_frg_len = skb_headlen(skb);
-
- dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data,
- first_frg_len, DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) {
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
- fifo->stats.pci_map_fail++;
- goto _exit0;
- }
-
- txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
- txdl_priv->skb = skb;
- txdl_priv->dma_buffers[j] = dma_pointer;
-
- frg_cnt = skb_shinfo(skb)->nr_frags;
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d skb = %p txdl_priv = %p "
- "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
- __func__, __LINE__, skb, txdl_priv,
- frg_cnt, (unsigned long long)dma_pointer);
-
- vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- first_frg_len);
-
- frag = &skb_shinfo(skb)->frags[0];
- for (i = 0; i < frg_cnt; i++) {
- /* ignore 0 length fragment */
- if (!skb_frag_size(frag))
- continue;
-
- dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
- goto _exit2;
- vxge_debug_tx(VXGE_TRACE,
- "%s: %s:%d frag = %d dma_pointer = 0x%llx",
- dev->name, __func__, __LINE__, i,
- (unsigned long long)dma_pointer);
-
- txdl_priv->dma_buffers[j] = dma_pointer;
- vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- skb_frag_size(frag));
- frag += 1;
- }
-
- offload_type = vxge_offload_type(skb);
-
- if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- int mss = vxge_tcp_mss(skb);
- if (mss) {
- vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
- dev->name, __func__, __LINE__, mss);
- vxge_hw_fifo_txdl_mss_set(dtr, mss);
- } else {
- vxge_assert(skb->len <=
- dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
- vxge_assert(0);
- goto _exit1;
- }
- }
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vxge_hw_fifo_txdl_cksum_set_bits(dtr,
- VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
- VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
- VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
-
- vxge_hw_fifo_txdl_post(fifo_hw, dtr);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
- dev->name, __func__, __LINE__);
- return NETDEV_TX_OK;
-
-_exit2:
- vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
-_exit1:
- j = 0;
- frag = &skb_shinfo(skb)->frags[0];
-
- dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (; j < i; j++) {
- dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-_exit0:
- netif_tx_stop_queue(fifo->txq);
- dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
-}
-
-/*
- * vxge_rx_term
- *
- * Function will be called by hw function to abort all outstanding receive
- * descriptors.
- */
-static void
-vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
-{
- struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct vxge_rx_priv *rx_priv =
- vxge_hw_ring_rxd_private_get(dtrh);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- ring->ndev->name, __func__, __LINE__);
- if (state != VXGE_HW_RXD_STATE_POSTED)
- return;
-
- dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma,
- rx_priv->data_size, DMA_FROM_DEVICE);
-
- dev_kfree_skb(rx_priv->skb);
- rx_priv->skb_data = NULL;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- ring->ndev->name, __func__, __LINE__);
-}
-
-/*
- * vxge_tx_term
- *
- * Function will be called to abort all outstanding tx descriptors
- */
-static void
-vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
- skb_frag_t *frag;
- int i = 0, j, frg_cnt;
- struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
- struct sk_buff *skb = txd_priv->skb;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if (state != VXGE_HW_TXDL_STATE_POSTED)
- return;
-
- /* check skb validity */
- vxge_assert(skb);
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
-
- /* for unfragmented skb */
- dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_headlen(skb), DMA_TO_DEVICE);
-
- for (j = 0; j < frg_cnt; j++) {
- dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
- skb_frag_size(frag), DMA_TO_DEVICE);
- frag += 1;
- }
-
- dev_kfree_skb(skb);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct list_head *entry, *next;
- u64 del_mac = 0;
- u8 *mac_address = (u8 *) (&del_mac);
-
- /* Copy the mac address to delete from the list */
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
- list_del(entry);
- kfree(entry);
- vpath->mac_addr_cnt--;
-
- if (is_multicast_ether_addr(mac->macaddr))
- vpath->mcast_addr_cnt--;
- return TRUE;
- }
- }
-
- return FALSE;
-}
-
-/* delete a mac address from DA table */
-static enum vxge_hw_status
-vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
- mac->macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config delete entry failed for vpath:%d",
- vpath->device_id);
- } else
- vxge_mac_list_del(vpath, mac);
- return status;
-}
-
-/**
- * vxge_set_multicast
- * @dev: pointer to the device structure
- *
- * Entry point for multicast address enable/disable
- * This function is a driver entry point which gets called by the kernel
- * whenever multicast addresses must be enabled/disabled. This also gets
- * called to set/reset promiscuous mode. Depending on the deivce flag, we
- * determine, if multicast address must be enabled or if promiscuous mode
- * is to be disabled etc.
- */
-static void vxge_set_multicast(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- struct vxgedev *vdev;
- int i, mcast_cnt = 0;
- struct vxge_vpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- int vpath_idx = 0;
- struct vxge_mac_addrs *mac_entry;
- struct list_head *list_head;
- struct list_head *entry, *next;
- u8 *mac_address = NULL;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return;
-
- if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to enable "
- "multicast, status %d", status);
- vdev->all_multi_flg = 1;
- }
- } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
- status = vxge_hw_vpath_mcast_disable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to disable "
- "multicast, status %d", status);
- vdev->all_multi_flg = 0;
- }
- }
-
-
- if (!vdev->config.addr_learn_en) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
-
- if (dev->flags & IFF_PROMISC)
- status = vxge_hw_vpath_promisc_enable(
- vpath->handle);
- else
- status = vxge_hw_vpath_promisc_disable(
- vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR, "failed to %s promisc"
- ", status %d", dev->flags&IFF_PROMISC ?
- "enable" : "disable", status);
- }
- }
-
- memset(&mac_info, 0, sizeof(struct macInfo));
- /* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
- mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
- list_head = &vdev->vpaths[0].mac_addr_list;
- if ((netdev_mc_count(dev) +
- (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
- vdev->vpaths[0].max_mac_addr_cnt)
- goto _set_all_mcast;
-
- /* Delete previous MC's */
- for (i = 0; i < mcast_cnt; i++) {
- list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *)entry;
- /* Copy the mac address to delete */
- mac_address = (u8 *)&mac_entry->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-
- if (is_multicast_ether_addr(mac_info.macaddr)) {
- for (vpath_idx = 0; vpath_idx <
- vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(
- vdev,
- &mac_info);
- }
- }
- }
- }
-
- /* Add new ones */
- netdev_for_each_mc_addr(ha, dev) {
- memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Setting individual"
- "multicast address failed",
- __func__, __LINE__);
- goto _set_all_mcast;
- }
- }
- }
-
- return;
-_set_all_mcast:
- mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
- /* Delete previous MC's */
- for (i = 0; i < mcast_cnt; i++) {
- list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *)entry;
- /* Copy the mac address to delete */
- mac_address = (u8 *)&mac_entry->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-
- if (is_multicast_ether_addr(mac_info.macaddr))
- break;
- }
-
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
- vpath_idx++) {
- mac_info.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(vdev, &mac_info);
- }
- }
-
- /* Enable all multicast */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_open);
-
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling all multicasts failed",
- __func__, __LINE__);
- }
- vdev->all_multi_flg = 1;
- }
- dev->flags |= IFF_ALLMULTI;
- }
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
- * vxge_set_mac_addr
- * @dev: pointer to the device structure
- * @p: socket info
- *
- * Update entry "0" (default MAC addr)
- */
-static int vxge_set_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
- struct vxgedev *vdev;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info_new, mac_info_old;
- int vpath_idx = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memset(&mac_info_new, 0, sizeof(struct macInfo));
- memset(&mac_info_old, 0, sizeof(struct macInfo));
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
- __func__, __LINE__);
-
- /* Get the old address */
- memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
-
- /* Copy the new address */
- memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
-
- /* First delete the old mac address from all the vpaths
- as we can't specify the index while adding new mac address */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
- if (!vpath->is_open) {
- /* This can happen when this interface is added/removed
- to the bonding interface. Delete this station address
- from the linked list */
- vxge_mac_list_del(vpath, &mac_info_old);
-
- /* Add this new address to the linked list
- for later restoring */
- vxge_mac_list_add(vpath, &mac_info_new);
-
- continue;
- }
- /* Delete the station address */
- mac_info_old.vpath_no = vpath_idx;
- status = vxge_del_mac_addr(vdev, &mac_info_old);
- }
-
- if (unlikely(!is_vxge_card_up(vdev))) {
- eth_hw_addr_set(dev, addr->sa_data);
- return VXGE_HW_OK;
- }
-
- /* Set this mac address to all the vpaths */
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- mac_info_new.vpath_no = vpath_idx;
- mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- status = vxge_add_mac_addr(vdev, &mac_info_new);
- if (status != VXGE_HW_OK)
- return -EINVAL;
- }
-
- eth_hw_addr_set(dev, addr->sa_data);
-
- return status;
-}
-
-/*
- * vxge_vpath_intr_enable
- * @vdev: pointer to vdev
- * @vp_id: vpath for which to enable the interrupts
- *
- * Enables the interrupts for the vpath
-*/
-static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
-{
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int msix_id = 0;
- int tim_msix_id[4] = {0, 1, 0, 0};
- int alarm_msix_id = VXGE_ALARM_MSIX_ID;
-
- vxge_hw_vpath_intr_enable(vpath->handle);
-
- if (vdev->config.intr_type == INTA)
- vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
- else {
- vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
- alarm_msix_id);
-
- msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
-
- /* enable the alarm vector */
- msix_id = (vpath->handle->vpath->hldev->first_vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
- vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
- }
-}
-
-/*
- * vxge_vpath_intr_disable
- * @vdev: pointer to vdev
- * @vp_id: vpath for which to disable the interrupts
- *
- * Disables the interrupts for the vpath
-*/
-static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
-{
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- struct __vxge_hw_device *hldev;
- int msix_id;
-
- hldev = pci_get_drvdata(vdev->pdev);
-
- vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
-
- vxge_hw_vpath_intr_disable(vpath->handle);
-
- if (vdev->config.intr_type == INTA)
- vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
- else {
- msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
-
- /* disable the alarm vector */
- msix_id = (vpath->handle->vpath->hldev->first_vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
- vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
- }
-}
-
-/* list all mac addresses from DA table */
-static enum vxge_hw_status
-vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- unsigned char macmask[ETH_ALEN];
- unsigned char macaddr[ETH_ALEN];
-
- status = vxge_hw_vpath_mac_addr_get(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config list entry failed for vpath:%d",
- vpath->device_id);
- return status;
- }
-
- while (!ether_addr_equal(mac->macaddr, macaddr)) {
- status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK)
- break;
- }
-
- return status;
-}
-
-/* Store all mac addresses from the list to the DA table */
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- struct list_head *entry, *next;
-
- memset(&mac_info, 0, sizeof(struct macInfo));
-
- if (vpath->is_open) {
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- mac_address =
- (u8 *)&
- ((struct vxge_mac_addrs *)entry)->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
- ((struct vxge_mac_addrs *)entry)->state =
- VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- /* does this mac address already exist in da table? */
- status = vxge_search_mac_addr_in_da_table(vpath,
- &mac_info);
- if (status != VXGE_HW_OK) {
- /* Add this mac address to the DA table */
- status = vxge_hw_vpath_mac_addr_add(
- vpath->handle, mac_info.macaddr,
- mac_info.macmask,
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA add entry failed for vpath:%d",
- vpath->device_id);
- ((struct vxge_mac_addrs *)entry)->state
- = VXGE_LL_MAC_ADDR_IN_LIST;
- }
- }
- }
- }
-
- return status;
-}
-
-/* Store all vlan ids from the list to the vid table */
-static enum vxge_hw_status
-vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev = vpath->vdev;
- u16 vid;
-
- if (!vpath->is_open)
- return status;
-
- for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
- status = vxge_hw_vpath_vid_add(vpath->handle, vid);
-
- return status;
-}
-
-/*
- * vxge_reset_vpath
- * @vdev: pointer to vdev
- * @vp_id: vpath to reset
- *
- * Resets the vpath
-*/
-static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int ret = 0;
-
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* is device reset already scheduled */
- if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- return 0;
-
- if (vpath->handle) {
- if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
- if (is_vxge_card_up(vdev) &&
- vxge_hw_vpath_recover_from_reset(vpath->handle)
- != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_from_reset"
- "failed for vpath:%d", vp_id);
- return status;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for"
- "vpath:%d", vp_id);
- return status;
- }
- } else
- return VXGE_HW_FAIL;
-
- vxge_restore_vpath_mac_addr(vpath);
- vxge_restore_vpath_vid_table(vpath);
-
- /* Enable all broadcast */
- vxge_hw_vpath_bcast_enable(vpath->handle);
-
- /* Enable all multicast */
- if (vdev->all_multi_flg) {
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling multicast failed",
- __func__, __LINE__);
- }
-
- /* Enable the interrupts */
- vxge_vpath_intr_enable(vdev, vp_id);
-
- smp_wmb();
-
- /* Enable the flow of traffic through the vpath */
- vxge_hw_vpath_enable(vpath->handle);
-
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vpath->handle);
- vpath->ring.last_status = VXGE_HW_OK;
-
- /* Vpath reset done */
- clear_bit(vp_id, &vdev->vp_reset);
-
- /* Start the vpath queue */
- if (netif_tx_queue_stopped(vpath->fifo.txq))
- netif_tx_wake_queue(vpath->fifo.txq);
-
- return ret;
-}
-
-/* Configure CI */
-static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
-{
- int i = 0;
-
- /* Enable CI for RTI */
- if (vdev->config.intr_type == MSI_X) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct __vxge_hw_ring *hw_ring;
-
- hw_ring = vdev->vpaths[i].ring.handle;
- vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
- }
- }
-
- /* Enable CI for TTI */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
- vxge_hw_vpath_tti_ci_set(hw_fifo);
- /*
- * For Inta (with or without napi), Set CI ON for only one
- * vpath. (Have only one free running timer).
- */
- if ((vdev->config.intr_type == INTA) && (i == 0))
- break;
- }
-
- return;
-}
-
-static int do_vxge_reset(struct vxgedev *vdev, int event)
-{
- int ret = 0, vp_id, i;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* is reset already scheduled */
- if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- return 0;
- }
-
- if (event == VXGE_LL_FULL_RESET) {
- netif_carrier_off(vdev->ndev);
-
- /* wait for all the vpath reset to complete */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- while (test_bit(vp_id, &vdev->vp_reset))
- msleep(50);
- }
-
- netif_carrier_on(vdev->ndev);
-
- /* if execution mode is set to debug, don't reset the adapter */
- if (unlikely(vdev->exec_mode)) {
- vxge_debug_init(VXGE_ERR,
- "%s: execution mode is debug, returning..",
- vdev->ndev->name);
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- netif_tx_stop_all_queues(vdev->ndev);
- return 0;
- }
- }
-
- if (event == VXGE_LL_FULL_RESET) {
- vxge_hw_device_wait_receive_idle(vdev->devh);
- vxge_hw_device_intr_disable(vdev->devh);
-
- switch (vdev->cric_err_event) {
- case VXGE_HW_EVENT_UNKNOWN:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "unknown error",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_RESET_START:
- break;
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- case VXGE_HW_EVENT_ALARM_CLEARED:
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- break;
- case VXGE_HW_EVENT_CRITICAL_ERR:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "serious error",
- vdev->ndev->name);
- /* SOP or device reset required */
- /* This event is not currently used */
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SERR:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "serious error",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- ret = -EPERM;
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- netif_tx_stop_all_queues(vdev->ndev);
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: Disabling device due to"
- "slot freeze",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- default:
- break;
-
- }
- }
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
- netif_tx_stop_all_queues(vdev->ndev);
-
- if (event == VXGE_LL_FULL_RESET) {
- vxge_reset_all_vpaths(vdev);
- }
-
- if (event == VXGE_LL_COMPL_RESET) {
- for (i = 0; i < vdev->no_of_vpath; i++)
- if (vdev->vpaths[i].handle) {
- if (vxge_hw_vpath_recover_from_reset(
- vdev->vpaths[i].handle)
- != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_"
- "from_reset failed for vpath: "
- "%d", i);
- ret = -EPERM;
- goto out;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for "
- "vpath:%d", i);
- ret = -EPERM;
- goto out;
- }
- }
-
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
- /* Reprogram the DA table with populated mac addresses */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
- vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
- }
-
- /* enable vpath interrupts */
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_vpath_intr_enable(vdev, i);
-
- vxge_hw_device_intr_enable(vdev->devh);
-
- smp_wmb();
-
- /* Indicate card up */
- set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- /* Get the traffic to flow through the vpaths */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_hw_vpath_enable(vdev->vpaths[i].handle);
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
- }
-
- netif_tx_wake_all_queues(vdev->ndev);
- }
-
- /* configure CI */
- vxge_config_ci_for_tti_rti(vdev);
-
-out:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-
- /* Indicate reset done */
- if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
- clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
- return ret;
-}
-
-/*
- * vxge_reset
- * @vdev: pointer to ll device
- *
- * driver may reset the chip on events of serr, eccerr, etc
- */
-static void vxge_reset(struct work_struct *work)
-{
- struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
-
- if (!netif_running(vdev->ndev))
- return;
-
- do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-}
-
-/**
- * vxge_poll_msix - Receive handler when Receive Polling is used.
- * @napi: pointer to the napi structure.
- * @budget: Number of packets budgeted to be processed in this iteration.
- *
- * This function comes into picture only if Receive side is being handled
- * through polling (called NAPI in linux). It mostly does what the normal
- * Rx interrupt handler does in terms of descriptor and packet processing
- * but not in an interrupt context. Also it will process a specified number
- * of packets at most in one iteration. This value is passed down by the
- * kernel as the function argument 'budget'.
- */
-static int vxge_poll_msix(struct napi_struct *napi, int budget)
-{
- struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
- int pkts_processed;
- int budget_org = budget;
-
- ring->budget = budget;
- ring->pkts_processed = 0;
- vxge_hw_vpath_poll_rx(ring->handle);
- pkts_processed = ring->pkts_processed;
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
-
- /* Re enable the Rx interrupts for the vpath */
- vxge_hw_channel_msix_unmask(
- (struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
- }
-
- /* We are copying and returning the local variable, in case if after
- * clearing the msix interrupt above, if the interrupt fires right
- * away which can preempt this NAPI thread */
- return pkts_processed;
-}
-
-static int vxge_poll_inta(struct napi_struct *napi, int budget)
-{
- struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
- int pkts_processed = 0;
- int i;
- int budget_org = budget;
- struct vxge_ring *ring;
-
- struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
- ring->budget = budget;
- ring->pkts_processed = 0;
- vxge_hw_vpath_poll_rx(ring->handle);
- pkts_processed += ring->pkts_processed;
- budget -= ring->pkts_processed;
- if (budget <= 0)
- break;
- }
-
- VXGE_COMPLETE_ALL_TX(vdev);
-
- if (pkts_processed < budget_org) {
- napi_complete_done(napi, pkts_processed);
- /* Re enable the Rx interrupts for the ring */
- vxge_hw_device_unmask_all(hldev);
- vxge_hw_device_flush_io(hldev);
- }
-
- return pkts_processed;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * vxge_netpoll - netpoll event handler entry point
- * @dev : pointer to the device structure.
- * Description:
- * This function will be called by upper layer to check for events on the
- * interface in situations where interrupts are disabled. It is used for
- * specific in-kernel networking tasks, such as remote consoles and kernel
- * debugging over the network (example netdump in RedHat).
- */
-static void vxge_netpoll(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct pci_dev *pdev = vdev->pdev;
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- const int irq = pdev->irq;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- if (pci_channel_offline(pdev))
- return;
-
- disable_irq(irq);
- vxge_hw_device_clear_tx_rx(hldev);
-
- vxge_hw_device_clear_tx_rx(hldev);
- VXGE_COMPLETE_ALL_RX(vdev);
- VXGE_COMPLETE_ALL_TX(vdev);
-
- enable_irq(irq);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-#endif
-
-/* RTH configuration */
-static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_rth_hash_types hash_types;
- u8 itable[256] = {0}; /* indirection table */
- u8 mtable[256] = {0}; /* CPU to vpath mapping */
- int index;
-
- /*
- * Filling
- * - itable with bucket numbers
- * - mtable with bucket-to-vpath mapping
- */
- for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
- itable[index] = index;
- mtable[index] = index % vdev->no_of_vpath;
- }
-
- /* set indirection table, bucket-to-vpath mapping */
- status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
- vdev->no_of_vpath,
- mtable, itable,
- vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "RTH indirection table configuration failed "
- "for vpath:%d", vdev->vpaths[0].device_id);
- return status;
- }
-
- /* Fill RTH hash types */
- hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
- hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
- hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
- hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
- hash_types.hash_type_tcpipv6ex_en =
- vdev->config.rth_hash_type_tcpipv6ex;
- hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
-
- /*
- * Because the itable_set() method uses the active_table field
- * for the target virtual path the RTH config should be updated
- * for all VPATHs. The h/w only uses the lowest numbered VPATH
- * when steering frames.
- */
- for (index = 0; index < vdev->no_of_vpath; index++) {
- status = vxge_hw_vpath_rts_rth_set(
- vdev->vpaths[index].handle,
- vdev->config.rth_algorithm,
- &hash_types,
- vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "RTH configuration failed for vpath:%d",
- vdev->vpaths[index].device_id);
- return status;
- }
- }
-
- return status;
-}
-
-/* reset vpaths */
-static void vxge_reset_all_vpaths(struct vxgedev *vdev)
-{
- struct vxge_vpath *vpath;
- int i;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- if (vpath->handle) {
- if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
- if (is_vxge_card_up(vdev) &&
- vxge_hw_vpath_recover_from_reset(
- vpath->handle) != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_recover_"
- "from_reset failed for vpath: "
- "%d", i);
- return;
- }
- } else {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_reset failed for "
- "vpath:%d", i);
- return;
- }
- }
- }
-}
-
-/* close vpaths */
-static void vxge_close_vpaths(struct vxgedev *vdev, int index)
-{
- struct vxge_vpath *vpath;
- int i;
-
- for (i = index; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- if (vpath->handle && vpath->is_open) {
- vxge_hw_vpath_close(vpath->handle);
- vdev->stats.vpaths_open--;
- }
- vpath->is_open = 0;
- vpath->handle = NULL;
- }
-}
-
-/* open vpaths */
-static int vxge_open_vpaths(struct vxgedev *vdev)
-{
- struct vxge_hw_vpath_attr attr;
- enum vxge_hw_status status;
- struct vxge_vpath *vpath;
- u32 vp_id = 0;
- int i;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_assert(vpath->is_configured);
-
- if (!vdev->titan1) {
- struct vxge_hw_vp_config *vcfg;
- vcfg = &vdev->devh->config.vp_config[vpath->device_id];
-
- vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
- vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
- vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
- vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
- vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
- vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
- vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
- vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
- vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
- }
-
- attr.vp_id = vpath->device_id;
- attr.fifo_attr.callback = vxge_xmit_compl;
- attr.fifo_attr.txdl_term = vxge_tx_term;
- attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
- attr.fifo_attr.userdata = &vpath->fifo;
-
- attr.ring_attr.callback = vxge_rx_1b_compl;
- attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
- attr.ring_attr.rxd_term = vxge_rx_term;
- attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
- attr.ring_attr.userdata = &vpath->ring;
-
- vpath->ring.ndev = vdev->ndev;
- vpath->ring.pdev = vdev->pdev;
-
- status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
- if (status == VXGE_HW_OK) {
- vpath->fifo.handle =
- (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
- vpath->ring.handle =
- (struct __vxge_hw_ring *)attr.ring_attr.userdata;
- vpath->fifo.tx_steering_type =
- vdev->config.tx_steering_type;
- vpath->fifo.ndev = vdev->ndev;
- vpath->fifo.pdev = vdev->pdev;
-
- u64_stats_init(&vpath->fifo.stats.syncp);
- u64_stats_init(&vpath->ring.stats.syncp);
-
- if (vdev->config.tx_steering_type)
- vpath->fifo.txq =
- netdev_get_tx_queue(vdev->ndev, i);
- else
- vpath->fifo.txq =
- netdev_get_tx_queue(vdev->ndev, 0);
- vpath->fifo.indicate_max_pkts =
- vdev->config.fifo_indicate_max_pkts;
- vpath->fifo.tx_vector_no = 0;
- vpath->ring.rx_vector_no = 0;
- vpath->ring.rx_hwts = vdev->rx_hwts;
- vpath->is_open = 1;
- vdev->vp_handles[i] = vpath->handle;
- vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
- vdev->stats.vpaths_open++;
- } else {
- vdev->stats.vpath_open_fail++;
- vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
- "open with status: %d",
- vdev->ndev->name, vpath->device_id,
- status);
- vxge_close_vpaths(vdev, 0);
- return -EPERM;
- }
-
- vp_id = vpath->handle->vpath->vp_id;
- vdev->vpaths_deployed |= vxge_mBIT(vp_id);
- }
-
- return VXGE_HW_OK;
-}
-
-/**
- * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
- * if the interrupts are not within a range
- * @fifo: pointer to transmit fifo structure
- * Description: The function changes boundary timer and restriction timer
- * value depends on the traffic
- * Return Value: None
- */
-static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
-{
- fifo->interrupt_count++;
- if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
- struct __vxge_hw_fifo *hw_fifo = fifo->handle;
-
- fifo->jiffies = jiffies;
- if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
- hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
- hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
- vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
- } else if (hw_fifo->rtimer != 0) {
- hw_fifo->rtimer = 0;
- vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
- }
- fifo->interrupt_count = 0;
- }
-}
-
-/**
- * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
- * if the interrupts are not within a range
- * @ring: pointer to receive ring structure
- * Description: The function increases of decreases the packet counts within
- * the ranges of traffic utilization, if the interrupts due to this ring are
- * not within a fixed range.
- * Return Value: Nothing
- */
-static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
-{
- ring->interrupt_count++;
- if (time_before(ring->jiffies + HZ / 100, jiffies)) {
- struct __vxge_hw_ring *hw_ring = ring->handle;
-
- ring->jiffies = jiffies;
- if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
- hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
- hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
- vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
- } else if (hw_ring->rtimer != 0) {
- hw_ring->rtimer = 0;
- vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
- }
- ring->interrupt_count = 0;
- }
-}
-
-/*
- * vxge_isr_napi
- * @irq: the irq of the device.
- * @dev_id: a void pointer to the hldev structure of the Titan device
- * @ptregs: pointer to the registers pushed on the stack.
- *
- * This function is the ISR handler of the device when napi is enabled. It
- * identifies the reason for the interrupt and calls the relevant service
- * routines.
- */
-static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
-{
- struct __vxge_hw_device *hldev;
- u64 reason;
- enum vxge_hw_status status;
- struct vxgedev *vdev = (struct vxgedev *)dev_id;
-
- vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- hldev = pci_get_drvdata(vdev->pdev);
-
- if (pci_channel_offline(vdev->pdev))
- return IRQ_NONE;
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return IRQ_HANDLED;
-
- status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
- if (status == VXGE_HW_OK) {
- vxge_hw_device_mask_all(hldev);
-
- if (reason &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
- vdev->vpaths_deployed >>
- (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
-
- vxge_hw_device_clear_tx_rx(hldev);
- napi_schedule(&vdev->napi);
- vxge_debug_intr(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
- return IRQ_HANDLED;
- } else
- vxge_hw_device_unmask_all(hldev);
- } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
- (status == VXGE_HW_ERR_CRITICAL) ||
- (status == VXGE_HW_ERR_FIFO))) {
- vxge_hw_device_mask_all(hldev);
- vxge_hw_device_flush_io(hldev);
- return IRQ_HANDLED;
- } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
- return IRQ_HANDLED;
-
- vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
- return IRQ_NONE;
-}
-
-static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
-{
- struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
-
- adaptive_coalesce_tx_interrupts(fifo);
-
- vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- VXGE_COMPLETE_VPATH_TX(fifo);
-
- vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
- fifo->tx_vector_no);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
-{
- struct vxge_ring *ring = (struct vxge_ring *)dev_id;
-
- adaptive_coalesce_rx_interrupts(ring);
-
- vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
-
- vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
-
- napi_schedule(&ring->napi);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-vxge_alarm_msix_handle(int irq, void *dev_id)
-{
- int i;
- enum vxge_hw_status status;
- struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
- struct vxgedev *vdev = vpath->vdev;
- int msix_id = (vpath->handle->vpath->vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- /* Reduce the chance of losing alarm interrupts by masking
- * the vector. A pending bit will be set if an alarm is
- * generated and on unmask the interrupt will be fired.
- */
- vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
- vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
-
- status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
- vdev->exec_mode);
- if (status == VXGE_HW_OK) {
- vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- msix_id);
- continue;
- }
- vxge_debug_intr(VXGE_ERR,
- "%s: vxge_hw_vpath_alarm_process failed %x ",
- VXGE_DRIVER_NAME, status);
- }
- return IRQ_HANDLED;
-}
-
-static int vxge_alloc_msix(struct vxgedev *vdev)
-{
- int j, i, ret = 0;
- int msix_intr_vect = 0, temp;
- vdev->intr_cnt = 0;
-
-start:
- /* Tx/Rx MSIX Vectors count */
- vdev->intr_cnt = vdev->no_of_vpath * 2;
-
- /* Alarm MSIX Vectors count */
- vdev->intr_cnt++;
-
- vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!vdev->entries) {
- vxge_debug_init(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- ret = -ENOMEM;
- goto alloc_entries_failed;
- }
-
- vdev->vxge_entries = kcalloc(vdev->intr_cnt,
- sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
- if (!vdev->vxge_entries) {
- vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- ret = -ENOMEM;
- goto alloc_vxge_entries_failed;
- }
-
- for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
-
- msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
-
- /* Initialize the fifo vector */
- vdev->entries[j].entry = msix_intr_vect;
- vdev->vxge_entries[j].entry = msix_intr_vect;
- vdev->vxge_entries[j].in_use = 0;
- j++;
-
- /* Initialize the ring vector */
- vdev->entries[j].entry = msix_intr_vect + 1;
- vdev->vxge_entries[j].entry = msix_intr_vect + 1;
- vdev->vxge_entries[j].in_use = 0;
- j++;
- }
-
- /* Initialize the alarm vector */
- vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
- vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
- vdev->vxge_entries[j].in_use = 0;
-
- ret = pci_enable_msix_range(vdev->pdev,
- vdev->entries, 3, vdev->intr_cnt);
- if (ret < 0) {
- ret = -ENODEV;
- goto enable_msix_failed;
- } else if (ret < vdev->intr_cnt) {
- pci_disable_msix(vdev->pdev);
-
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enable failed for %d vectors, ret: %d",
- VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
- if (max_config_vpath != VXGE_USE_DEFAULT) {
- ret = -ENODEV;
- goto enable_msix_failed;
- }
-
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- vdev->entries = NULL;
- vdev->vxge_entries = NULL;
- /* Try with less no of vector by reducing no of vpaths count */
- temp = (ret - 1)/2;
- vxge_close_vpaths(vdev, temp);
- vdev->no_of_vpath = temp;
- goto start;
- }
- return 0;
-
-enable_msix_failed:
- kfree(vdev->vxge_entries);
-alloc_vxge_entries_failed:
- kfree(vdev->entries);
-alloc_entries_failed:
- return ret;
-}
-
-static int vxge_enable_msix(struct vxgedev *vdev)
-{
-
- int i, ret = 0;
- /* 0 - Tx, 1 - Rx */
- int tim_msix_id[4] = {0, 1, 0, 0};
-
- vdev->intr_cnt = 0;
-
- /* allocate msix vectors */
- ret = vxge_alloc_msix(vdev);
- if (!ret) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- struct vxge_vpath *vpath = &vdev->vpaths[i];
-
- /* If fifo or ring are not enabled, the MSIX vector for
- * it should be set to 0.
- */
- vpath->ring.rx_vector_no = (vpath->device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
-
- vpath->fifo.tx_vector_no = (vpath->device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE);
-
- vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
- VXGE_ALARM_MSIX_ID);
- }
- }
-
- return ret;
-}
-
-static void vxge_rem_msix_isr(struct vxgedev *vdev)
-{
- int intr_cnt;
-
- for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
- intr_cnt++) {
- if (vdev->vxge_entries[intr_cnt].in_use) {
- free_irq(vdev->entries[intr_cnt].vector,
- vdev->vxge_entries[intr_cnt].arg);
- vdev->vxge_entries[intr_cnt].in_use = 0;
- }
- }
-
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- vdev->entries = NULL;
- vdev->vxge_entries = NULL;
-
- if (vdev->config.intr_type == MSI_X)
- pci_disable_msix(vdev->pdev);
-}
-
-static void vxge_rem_isr(struct vxgedev *vdev)
-{
- if (IS_ENABLED(CONFIG_PCI_MSI) &&
- vdev->config.intr_type == MSI_X) {
- vxge_rem_msix_isr(vdev);
- } else if (vdev->config.intr_type == INTA) {
- free_irq(vdev->pdev->irq, vdev);
- }
-}
-
-static int vxge_add_isr(struct vxgedev *vdev)
-{
- int ret = 0;
- int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
- int pci_fun = PCI_FUNC(vdev->pdev->devfn);
-
- if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
- ret = vxge_enable_msix(vdev);
-
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
- vdev->config.intr_type = INTA;
- }
-
- if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
- for (intr_idx = 0;
- intr_idx < (vdev->no_of_vpath *
- VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
-
- msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
- irq_req = 0;
-
- switch (msix_idx) {
- case 0:
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun, vp_idx);
- ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_tx_msix_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx].fifo);
- vdev->vxge_entries[intr_cnt].arg =
- &vdev->vpaths[vp_idx].fifo;
- irq_req = 1;
- break;
- case 1:
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun, vp_idx);
- ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_rx_msix_napi_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx].ring);
- vdev->vxge_entries[intr_cnt].arg =
- &vdev->vpaths[vp_idx].ring;
- irq_req = 1;
- break;
- }
-
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSIX - %d Registration failed",
- vdev->ndev->name, intr_cnt);
- vxge_rem_msix_isr(vdev);
- vdev->config.intr_type = INTA;
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA",
- vdev->ndev->name);
- goto INTA_MODE;
- }
-
- if (irq_req) {
- /* We requested for this msix interrupt */
- vdev->vxge_entries[intr_cnt].in_use = 1;
- msix_idx += vdev->vpaths[vp_idx].device_id *
- VXGE_HW_VPATH_MSIX_ACTIVE;
- vxge_hw_vpath_msix_unmask(
- vdev->vpaths[vp_idx].handle,
- msix_idx);
- intr_cnt++;
- }
-
- /* Point to next vpath handler */
- if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
- (vp_idx < (vdev->no_of_vpath - 1)))
- vp_idx++;
- }
-
- intr_cnt = vdev->no_of_vpath * 2;
- snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Alarm - fn:%d",
- vdev->ndev->name,
- vdev->entries[intr_cnt].entry,
- pci_fun);
- /* For Alarm interrupts */
- ret = request_irq(vdev->entries[intr_cnt].vector,
- vxge_alarm_msix_handle, 0,
- vdev->desc[intr_cnt],
- &vdev->vpaths[0]);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSIX - %d Registration failed",
- vdev->ndev->name, intr_cnt);
- vxge_rem_msix_isr(vdev);
- vdev->config.intr_type = INTA;
- vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA",
- vdev->ndev->name);
- goto INTA_MODE;
- }
-
- msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
- VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
- vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
- msix_idx);
- vdev->vxge_entries[intr_cnt].in_use = 1;
- vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
- }
-
-INTA_MODE:
- if (vdev->config.intr_type == INTA) {
- snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
- "%s:vxge:INTA", vdev->ndev->name);
- vxge_hw_device_set_intr_type(vdev->devh,
- VXGE_HW_INTR_MODE_IRQLINE);
-
- vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
-
- ret = request_irq((int) vdev->pdev->irq,
- vxge_isr_napi,
- IRQF_SHARED, vdev->desc[0], vdev);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s %s-%d: ISR registration failed",
- VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
- return -ENODEV;
- }
- vxge_debug_init(VXGE_TRACE,
- "new %s-%d line allocated",
- "IRQ", vdev->pdev->irq);
- }
-
- return VXGE_HW_OK;
-}
-
-static void vxge_poll_vp_reset(struct timer_list *t)
-{
- struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
- int i, j = 0;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- if (test_bit(i, &vdev->vp_reset)) {
- vxge_reset_vpath(vdev, i);
- j++;
- }
- }
- if (j && (vdev->config.intr_type != MSI_X)) {
- vxge_hw_device_unmask_all(vdev->devh);
- vxge_hw_device_flush_io(vdev->devh);
- }
-
- mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
-}
-
-static void vxge_poll_vp_lockup(struct timer_list *t)
-{
- struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- struct vxge_ring *ring;
- int i;
- unsigned long rx_frms;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- ring = &vdev->vpaths[i].ring;
-
- /* Truncated to machine word size number of frames */
- rx_frms = READ_ONCE(ring->stats.rx_frms);
-
- /* Did this vpath received any packets */
- if (ring->stats.prev_rx_frms == rx_frms) {
- status = vxge_hw_vpath_check_leak(ring->handle);
-
- /* Did it received any packets last time */
- if ((VXGE_HW_FAIL == status) &&
- (VXGE_HW_FAIL == ring->last_status)) {
-
- /* schedule vpath reset */
- if (!test_and_set_bit(i, &vdev->vp_reset)) {
- vpath = &vdev->vpaths[i];
-
- /* disable interrupts for this vpath */
- vxge_vpath_intr_disable(vdev, i);
-
- /* stop the queue for this vpath */
- netif_tx_stop_queue(vpath->fifo.txq);
- continue;
- }
- }
- }
- ring->stats.prev_rx_frms = rx_frms;
- ring->last_status = status;
- }
-
- /* Check every 1 milli second */
- mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
-}
-
-static netdev_features_t vxge_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- netdev_features_t changed = dev->features ^ features;
-
- /* Enabling RTH requires some of the logic in vxge_device_register and a
- * vpath reset. Due to these restrictions, only allow modification
- * while the interface is down.
- */
- if ((changed & NETIF_F_RXHASH) && netif_running(dev))
- features ^= NETIF_F_RXHASH;
-
- return features;
-}
-
-static int vxge_set_features(struct net_device *dev, netdev_features_t features)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- netdev_features_t changed = dev->features ^ features;
-
- if (!(changed & NETIF_F_RXHASH))
- return 0;
-
- /* !netif_running() ensured by vxge_fix_features() */
-
- vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
- vxge_reset_all_vpaths(vdev);
-
- return 0;
-}
-
-/**
- * vxge_open
- * @dev: pointer to the device structure.
- *
- * This function is the open entry point of the driver. It mainly calls a
- * function to allocate Rx buffers and inserts them into the buffer
- * descriptors and then enables the Rx part of the NIC.
- * Return value: '0' on success and an appropriate (-)ve integer as
- * defined in errno.h file on failure.
- */
-static int vxge_open(struct net_device *dev)
-{
- enum vxge_hw_status status;
- struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
- struct vxge_vpath *vpath;
- int ret = 0;
- int i;
- u64 val64;
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d", dev->name, __func__, __LINE__);
-
- vdev = netdev_priv(dev);
- hldev = pci_get_drvdata(vdev->pdev);
-
- /* make sure you have link off by default every time Nic is
- * initialized */
- netif_carrier_off(dev);
-
- /* Open VPATHs */
- status = vxge_open_vpaths(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: Vpath open failed", vdev->ndev->name);
- ret = -EPERM;
- goto out0;
- }
-
- vdev->mtu = dev->mtu;
-
- status = vxge_add_isr(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: ISR add failed", dev->name);
- ret = -EPERM;
- goto out1;
- }
-
- if (vdev->config.intr_type != MSI_X) {
- netif_napi_add_weight(dev, &vdev->napi, vxge_poll_inta,
- vdev->config.napi_weight);
- napi_enable(&vdev->napi);
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vpath->ring.napi_p = &vdev->napi;
- }
- } else {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- netif_napi_add_weight(dev, &vpath->ring.napi,
- vxge_poll_msix,
- vdev->config.napi_weight);
- napi_enable(&vpath->ring.napi);
- vpath->ring.napi_p = &vpath->ring.napi;
- }
- }
-
- /* configure RTH */
- if (vdev->config.rth_steering) {
- status = vxge_rth_configure(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: RTH configuration failed",
- dev->name);
- ret = -EPERM;
- goto out2;
- }
- }
- printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
- hldev->config.rth_en ? "enabled" : "disabled");
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- /* set initial mtu before enabling the device */
- status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: fatal: can not set new MTU", dev->name);
- ret = -EPERM;
- goto out2;
- }
- }
-
- VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
- vxge_debug_init(vdev->level_trace,
- "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
- VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
-
- /* Restore the DA, VID table and also multicast and promiscuous mode
- * states
- */
- if (vdev->all_multi_flg) {
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- vxge_restore_vpath_mac_addr(vpath);
- vxge_restore_vpath_vid_table(vpath);
-
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s:%d Enabling multicast failed",
- __func__, __LINE__);
- }
- }
-
- /* Enable vpath to sniff all unicast/multicast traffic that not
- * addressed to them. We allow promiscuous mode for PF only
- */
-
- val64 = 0;
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_addr),
- val64);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_vid),
- val64);
-
- vxge_set_multicast(dev);
-
- /* Enabling Bcast and mcast for all vpath */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
- status = vxge_hw_vpath_bcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s : Can not enable bcast for vpath "
- "id %d", dev->name, i);
- if (vdev->config.addr_learn_en) {
- status = vxge_hw_vpath_mcast_enable(vpath->handle);
- if (status != VXGE_HW_OK)
- vxge_debug_init(VXGE_ERR,
- "%s : Can not enable mcast for vpath "
- "id %d", dev->name, i);
- }
- }
-
- vxge_hw_device_setpause_data(vdev->devh, 0,
- vdev->config.tx_pause_enable,
- vdev->config.rx_pause_enable);
-
- if (vdev->vp_reset_timer.function == NULL)
- vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset,
- HZ / 2);
-
- /* There is no need to check for RxD leak and RxD lookup on Titan1A */
- if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
- vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup,
- HZ / 2);
-
- set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- smp_wmb();
-
- if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
- netif_carrier_on(vdev->ndev);
- netdev_notice(vdev->ndev, "Link Up\n");
- vdev->stats.link_up++;
- }
-
- vxge_hw_device_intr_enable(vdev->devh);
-
- smp_wmb();
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- vpath = &vdev->vpaths[i];
-
- vxge_hw_vpath_enable(vpath->handle);
- smp_wmb();
- vxge_hw_vpath_rx_doorbell_init(vpath->handle);
- }
-
- netif_tx_start_all_queues(vdev->ndev);
-
- /* configure CI */
- vxge_config_ci_for_tti_rti(vdev);
-
- goto out0;
-
-out2:
- vxge_rem_isr(vdev);
-
- /* Disable napi */
- if (vdev->config.intr_type != MSI_X)
- napi_disable(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- napi_disable(&vdev->vpaths[i].ring.napi);
- }
-
-out1:
- vxge_close_vpaths(vdev, 0);
-out0:
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...",
- dev->name, __func__, __LINE__);
- return ret;
-}
-
-/* Loop through the mac address list and delete all the entries */
-static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
-{
-
- struct list_head *entry, *next;
- if (list_empty(&vpath->mac_addr_list))
- return;
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- list_del(entry);
- kfree(entry);
- }
-}
-
-static void vxge_napi_del_all(struct vxgedev *vdev)
-{
- int i;
- if (vdev->config.intr_type != MSI_X)
- netif_napi_del(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- netif_napi_del(&vdev->vpaths[i].ring.napi);
- }
-}
-
-static int do_vxge_close(struct net_device *dev, int do_io)
-{
- enum vxge_hw_status status;
- struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
- int i;
- u64 val64, vpath_vector;
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
- dev->name, __func__, __LINE__);
-
- vdev = netdev_priv(dev);
- hldev = pci_get_drvdata(vdev->pdev);
-
- if (unlikely(!is_vxge_card_up(vdev)))
- return 0;
-
- /* If vxge_handle_crit_err task is executing,
- * wait till it completes. */
- while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
- msleep(50);
-
- if (do_io) {
- /* Put the vpath back in normal mode */
- vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
- status = vxge_hw_mgmt_reg_read(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- &val64);
- if (status == VXGE_HW_OK) {
- val64 &= ~vpath_vector;
- status = vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(
- struct vxge_hw_mrpcim_reg,
- rts_mgr_cbasin_cfg),
- val64);
- }
-
- /* Remove the function 0 from promiscuous mode */
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_addr),
- 0);
-
- vxge_hw_mgmt_reg_write(vdev->devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- (ulong)offsetof(struct vxge_hw_mrpcim_reg,
- rxmac_authorize_all_vid),
- 0);
-
- smp_wmb();
- }
-
- if (vdev->titan1)
- del_timer_sync(&vdev->vp_lockup_timer);
-
- del_timer_sync(&vdev->vp_reset_timer);
-
- if (do_io)
- vxge_hw_device_wait_receive_idle(hldev);
-
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-
- /* Disable napi */
- if (vdev->config.intr_type != MSI_X)
- napi_disable(&vdev->napi);
- else {
- for (i = 0; i < vdev->no_of_vpath; i++)
- napi_disable(&vdev->vpaths[i].ring.napi);
- }
-
- netif_carrier_off(vdev->ndev);
- netdev_notice(vdev->ndev, "Link Down\n");
- netif_tx_stop_all_queues(vdev->ndev);
-
- /* Note that at this point xmit() is stopped by upper layer */
- if (do_io)
- vxge_hw_device_intr_disable(vdev->devh);
-
- vxge_rem_isr(vdev);
-
- vxge_napi_del_all(vdev);
-
- if (do_io)
- vxge_reset_all_vpaths(vdev);
-
- vxge_close_vpaths(vdev, 0);
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
-
- clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
-
- return 0;
-}
-
-/**
- * vxge_close
- * @dev: device pointer.
- *
- * This is the stop entry point of the driver. It needs to undo exactly
- * whatever was done by the open entry point, thus it's usually referred to
- * as the close function.Among other things this function mainly stops the
- * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
- * Return value: '0' on success and an appropriate (-)ve integer as
- * defined in errno.h file on failure.
- */
-static int vxge_close(struct net_device *dev)
-{
- do_vxge_close(dev, 1);
- return 0;
-}
-
-/**
- * vxge_change_mtu
- * @dev: net device pointer.
- * @new_mtu :the new MTU size for the device.
- *
- * A driver entry point to change MTU size for the device. Before changing
- * the MTU the device must be stopped.
- */
-static int vxge_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s:%d", __func__, __LINE__);
-
- /* check if device is down already */
- if (unlikely(!is_vxge_card_up(vdev))) {
- /* just store new value, will use later on open() */
- dev->mtu = new_mtu;
- vxge_debug_init(vdev->level_err,
- "%s", "device is down on MTU change");
- return 0;
- }
-
- vxge_debug_init(vdev->level_trace,
- "trying to apply new MTU %d", new_mtu);
-
- if (vxge_close(dev))
- return -EIO;
-
- dev->mtu = new_mtu;
- vdev->mtu = new_mtu;
-
- if (vxge_open(dev))
- return -EIO;
-
- vxge_debug_init(vdev->level_trace,
- "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s:%d Exiting...", __func__, __LINE__);
-
- return 0;
-}
-
-/**
- * vxge_get_stats64
- * @dev: pointer to the device structure
- * @net_stats: pointer to struct rtnl_link_stats64
- *
- */
-static void
-vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- int k;
-
- /* net_stats already zeroed by caller */
- for (k = 0; k < vdev->no_of_vpath; k++) {
- struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
- struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
- unsigned int start;
- u64 packets, bytes, multicast;
-
- do {
- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
-
- packets = rxstats->rx_frms;
- multicast = rxstats->rx_mcast;
- bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
-
- net_stats->rx_packets += packets;
- net_stats->rx_bytes += bytes;
- net_stats->multicast += multicast;
-
- net_stats->rx_errors += rxstats->rx_errors;
- net_stats->rx_dropped += rxstats->rx_dropped;
-
- do {
- start = u64_stats_fetch_begin_irq(&txstats->syncp);
-
- packets = txstats->tx_frms;
- bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
-
- net_stats->tx_packets += packets;
- net_stats->tx_bytes += bytes;
- net_stats->tx_errors += txstats->tx_errors;
- }
-}
-
-static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
-{
- enum vxge_hw_status status;
- u64 val64;
-
- /* Timestamp is passed to the driver via the FCS, therefore we
- * must disable the FCS stripping by the adapter. Since this is
- * required for the driver to load (due to a hardware bug),
- * there is no need to do anything special here.
- */
- val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
- VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
- VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
-
- status = vxge_hw_mgmt_reg_write(devh,
- vxge_hw_mgmt_reg_type_mrpcim,
- 0,
- offsetof(struct vxge_hw_mrpcim_reg,
- xmac_timestamp),
- val64);
- vxge_hw_device_flush_io(devh);
- devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
- return status;
-}
-
-static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
-{
- struct hwtstamp_config config;
- int i;
-
- if (copy_from_user(&config, data, sizeof(config)))
- return -EFAULT;
-
- /* Transmit HW Timestamp not supported */
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
- case HWTSTAMP_TX_ON:
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- vdev->rx_hwts = 0;
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- break;
-
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
- return -EFAULT;
-
- vdev->rx_hwts = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
-
- default:
- return -ERANGE;
- }
-
- for (i = 0; i < vdev->no_of_vpath; i++)
- vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
-
- if (copy_to_user(data, &config, sizeof(config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
-{
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (vdev->rx_hwts ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
- if (copy_to_user(data, &config, sizeof(config)))
- return -EFAULT;
-
- return 0;
-}
-
-/**
- * vxge_ioctl
- * @dev: Device pointer.
- * @rq: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
- * @cmd: This is used to distinguish between the different commands that
- * can be passed to the IOCTL functions.
- *
- * Entry point for the Ioctl.
- */
-static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return vxge_hwtstamp_set(vdev, rq->ifr_data);
- case SIOCGHWTSTAMP:
- return vxge_hwtstamp_get(vdev, rq->ifr_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * vxge_tx_watchdog
- * @dev: pointer to net device structure
- * @txqueue: index of the hanging queue
- *
- * Watchdog for transmit side.
- * This function is triggered if the Tx Queue is stopped
- * for a pre-defined amount of time when the Interface is still up.
- */
-static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue)
-{
- struct vxgedev *vdev;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
-
- schedule_work(&vdev->reset_task);
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
- * vxge_vlan_rx_add_vid
- * @dev: net device pointer.
- * @proto: vlan protocol
- * @vid: vid
- *
- * Add the vlan id to the devices vlan id table
- */
-static int
-vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath;
- int vp_id;
-
- /* Add these vlan to the vid table */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vpath = &vdev->vpaths[vp_id];
- if (!vpath->is_open)
- continue;
- vxge_hw_vpath_vid_add(vpath->handle, vid);
- }
- set_bit(vid, vdev->active_vlans);
- return 0;
-}
-
-/**
- * vxge_vlan_rx_kill_vid
- * @dev: net device pointer.
- * @proto: vlan protocol
- * @vid: vid
- *
- * Remove the vlan id from the device's vlan id table
- */
-static int
-vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath;
- int vp_id;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- /* Delete this vlan from the vid table */
- for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
- vpath = &vdev->vpaths[vp_id];
- if (!vpath->is_open)
- continue;
- vxge_hw_vpath_vid_delete(vpath->handle, vid);
- }
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
- clear_bit(vid, vdev->active_vlans);
- return 0;
-}
-
-static const struct net_device_ops vxge_netdev_ops = {
- .ndo_open = vxge_open,
- .ndo_stop = vxge_close,
- .ndo_get_stats64 = vxge_get_stats64,
- .ndo_start_xmit = vxge_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = vxge_set_multicast,
- .ndo_eth_ioctl = vxge_ioctl,
- .ndo_set_mac_address = vxge_set_mac_addr,
- .ndo_change_mtu = vxge_change_mtu,
- .ndo_fix_features = vxge_fix_features,
- .ndo_set_features = vxge_set_features,
- .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
- .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
- .ndo_tx_timeout = vxge_tx_watchdog,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = vxge_netpoll,
-#endif
-};
-
-static int vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int no_of_vpath, struct vxgedev **vdev_out)
-{
- struct net_device *ndev;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev;
- int ret = 0, no_of_queue = 1;
- u64 stat;
-
- *vdev_out = NULL;
- if (config->tx_steering_type)
- no_of_queue = no_of_vpath;
-
- ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
- no_of_queue);
- if (ndev == NULL) {
- vxge_debug_init(
- vxge_hw_device_trace_level_get(hldev),
- "%s : device allocation failed", __func__);
- ret = -ENODEV;
- goto _out0;
- }
-
- vxge_debug_entryexit(
- vxge_hw_device_trace_level_get(hldev),
- "%s: %s:%d Entering...",
- ndev->name, __func__, __LINE__);
-
- vdev = netdev_priv(ndev);
- memset(vdev, 0, sizeof(struct vxgedev));
-
- vdev->ndev = ndev;
- vdev->devh = hldev;
- vdev->pdev = hldev->pdev;
- memcpy(&vdev->config, config, sizeof(struct vxge_config));
- vdev->rx_hwts = 0;
- vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
-
- SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
-
- ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_HW_VLAN_CTAG_TX;
- if (vdev->config.rth_steering != NO_STEERING)
- ndev->hw_features |= NETIF_F_RXHASH;
-
- ndev->features |= ndev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
-
-
- ndev->netdev_ops = &vxge_netdev_ops;
-
- ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
- INIT_WORK(&vdev->reset_task, vxge_reset);
-
- vxge_initialize_ethtool_ops(ndev);
-
- /* Allocate memory for vpath */
- vdev->vpaths = kcalloc(no_of_vpath, sizeof(struct vxge_vpath),
- GFP_KERNEL);
- if (!vdev->vpaths) {
- vxge_debug_init(VXGE_ERR,
- "%s: vpath memory allocation failed",
- vdev->ndev->name);
- ret = -ENOMEM;
- goto _out1;
- }
-
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s : checksumming enabled", __func__);
-
- ndev->features |= NETIF_F_HIGHDMA;
-
- /* MTU range: 68 - 9600 */
- ndev->min_mtu = VXGE_HW_MIN_MTU;
- ndev->max_mtu = VXGE_HW_MAX_MTU;
-
- ret = register_netdev(ndev);
- if (ret) {
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s: %s : device registration failed!",
- ndev->name, __func__);
- goto _out2;
- }
-
- /* Set the factory defined MAC address initially */
- ndev->addr_len = ETH_ALEN;
-
- /* Make Link state as off at this point, when the Link change
- * interrupt comes the state will be automatically changed to
- * the right state.
- */
- netif_carrier_off(ndev);
-
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s: Ethernet device registered",
- ndev->name);
-
- hldev->ndev = ndev;
- *vdev_out = vdev;
-
- /* Resetting the Device stats */
- status = vxge_hw_mrpcim_stats_access(
- hldev,
- VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
- 0,
- 0,
- &stat);
-
- if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION)
- vxge_debug_init(
- vxge_hw_device_trace_level_get(hldev),
- "%s: device stats clear returns"
- "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name);
-
- vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
- "%s: %s:%d Exiting...",
- ndev->name, __func__, __LINE__);
-
- return ret;
-_out2:
- kfree(vdev->vpaths);
-_out1:
- free_netdev(ndev);
-_out0:
- return ret;
-}
-
-/*
- * vxge_device_unregister
- *
- * This function will unregister and free network device
- */
-static void vxge_device_unregister(struct __vxge_hw_device *hldev)
-{
- struct vxgedev *vdev;
- struct net_device *dev;
- char buf[IFNAMSIZ];
-
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
-
- vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
- __func__, __LINE__);
-
- strlcpy(buf, dev->name, IFNAMSIZ);
-
- flush_work(&vdev->reset_task);
-
- /* in 2.6 will call stop() if device is up */
- unregister_netdev(dev);
-
- kfree(vdev->vpaths);
-
- vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
- buf);
- vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
- __func__, __LINE__);
-
- /* we are safe to free it now */
- free_netdev(dev);
-}
-
-/*
- * vxge_callback_crit_err
- *
- * This function is called by the alarm handler in interrupt context.
- * Driver must analyze it based on the event type.
- */
-static void
-vxge_callback_crit_err(struct __vxge_hw_device *hldev,
- enum vxge_hw_event type, u64 vp_id)
-{
- struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = netdev_priv(dev);
- struct vxge_vpath *vpath = NULL;
- int vpath_idx;
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
-
- /* Note: This event type should be used for device wide
- * indications only - Serious errors, Slot freeze and critical errors
- */
- vdev->cric_err_event = type;
-
- for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
- vpath = &vdev->vpaths[vpath_idx];
- if (vpath->device_id == vp_id)
- break;
- }
-
- if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
- if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
- vxge_debug_init(VXGE_ERR,
- "%s: Slot is frozen", vdev->ndev->name);
- } else if (type == VXGE_HW_EVENT_SERR) {
- vxge_debug_init(VXGE_ERR,
- "%s: Encountered Serious Error",
- vdev->ndev->name);
- } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
- vxge_debug_init(VXGE_ERR,
- "%s: Encountered Critical Error",
- vdev->ndev->name);
- }
-
- if ((type == VXGE_HW_EVENT_SERR) ||
- (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
- vxge_hw_device_mask_all(hldev);
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
- (type == VXGE_HW_EVENT_VPATH_ERR)) {
-
- if (unlikely(vdev->exec_mode))
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
- else {
- /* check if this vpath is already set for reset */
- if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
-
- /* disable interrupts for this vpath */
- vxge_vpath_intr_disable(vdev, vpath_idx);
-
- /* stop the queue for this vpath */
- netif_tx_stop_queue(vpath->fifo.txq);
- }
- }
- }
-
- vxge_debug_entryexit(vdev->level_trace,
- "%s: %s:%d Exiting...",
- vdev->ndev->name, __func__, __LINE__);
-}
-
-static void verify_bandwidth(void)
-{
- int i, band_width, total = 0, equal_priority = 0;
-
- /* 1. If user enters 0 for some fifo, give equal priority to all */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (bw_percentage[i] == 0) {
- equal_priority = 1;
- break;
- }
- }
-
- if (!equal_priority) {
- /* 2. If sum exceeds 100, give equal priority to all */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (bw_percentage[i] == 0xFF)
- break;
-
- total += bw_percentage[i];
- if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
- equal_priority = 1;
- break;
- }
- }
- }
-
- if (!equal_priority) {
- /* Is all the bandwidth consumed? */
- if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
- if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
- /* Split rest of bw equally among next VPs*/
- band_width =
- (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
- (VXGE_HW_MAX_VIRTUAL_PATHS - i);
- if (band_width < 2) /* min of 2% */
- equal_priority = 1;
- else {
- for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
- i++)
- bw_percentage[i] =
- band_width;
- }
- }
- } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
- equal_priority = 1;
- }
-
- if (equal_priority) {
- vxge_debug_init(VXGE_ERR,
- "%s: Assigning equal bandwidth to all the vpaths",
- VXGE_DRIVER_NAME);
- bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
- VXGE_HW_MAX_VIRTUAL_PATHS;
- for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- bw_percentage[i] = bw_percentage[0];
- }
-}
-
-/*
- * Vpath configuration
- */
-static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
- u64 vpath_mask, struct vxge_config *config_param)
-{
- int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
- u32 txdl_size, txdl_per_memblock;
-
- temp = driver_config->vpath_per_dev;
- if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
- (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
- /* No more CPU. Return vpath number as zero.*/
- if (driver_config->g_no_cpus == -1)
- return 0;
-
- if (!driver_config->g_no_cpus)
- driver_config->g_no_cpus =
- netif_get_num_default_rss_queues();
-
- driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
- if (!driver_config->vpath_per_dev)
- driver_config->vpath_per_dev = 1;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- if (vxge_bVALn(vpath_mask, i, 1))
- default_no_vpath++;
-
- if (default_no_vpath < driver_config->vpath_per_dev)
- driver_config->vpath_per_dev = default_no_vpath;
-
- driver_config->g_no_cpus = driver_config->g_no_cpus -
- (driver_config->vpath_per_dev * 2);
- if (driver_config->g_no_cpus <= 0)
- driver_config->g_no_cpus = -1;
- }
-
- if (driver_config->vpath_per_dev == 1) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: Disable tx and rx steering, "
- "as single vpath is configured", VXGE_DRIVER_NAME);
- config_param->rth_steering = NO_STEERING;
- config_param->tx_steering_type = NO_STEERING;
- device_config->rth_en = 0;
- }
-
- /* configure bandwidth */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
- device_config->vp_config[i].min_bandwidth = bw_percentage[i];
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- device_config->vp_config[i].vp_id = i;
- device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
- if (no_of_vpaths < driver_config->vpath_per_dev) {
- if (!vxge_bVALn(vpath_mask, i, 1)) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d is not available",
- VXGE_DRIVER_NAME, i);
- continue;
- } else {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d available",
- VXGE_DRIVER_NAME, i);
- no_of_vpaths++;
- }
- } else {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: vpath: %d is not configured, "
- "max_config_vpath exceeded",
- VXGE_DRIVER_NAME, i);
- break;
- }
-
- /* Configure Tx fifo's */
- device_config->vp_config[i].fifo.enable =
- VXGE_HW_FIFO_ENABLE;
- device_config->vp_config[i].fifo.max_frags =
- MAX_SKB_FRAGS + 1;
- device_config->vp_config[i].fifo.memblock_size =
- VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
-
- txdl_size = device_config->vp_config[i].fifo.max_frags *
- sizeof(struct vxge_hw_fifo_txd);
- txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
-
- device_config->vp_config[i].fifo.fifo_blocks =
- ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
-
- device_config->vp_config[i].fifo.intr =
- VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
-
- /* Configure tti properties */
- device_config->vp_config[i].tti.intr_enable =
- VXGE_HW_TIM_INTR_ENABLE;
-
- device_config->vp_config[i].tti.btimer_val =
- (VXGE_TTI_BTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.timer_ac_en =
- VXGE_HW_TIM_TIMER_AC_ENABLE;
-
- /* For msi-x with napi (each vector has a handler of its own) -
- * Set CI to OFF for all vpaths
- */
- device_config->vp_config[i].tti.timer_ci_en =
- VXGE_HW_TIM_TIMER_CI_DISABLE;
-
- device_config->vp_config[i].tti.timer_ri_en =
- VXGE_HW_TIM_TIMER_RI_DISABLE;
-
- device_config->vp_config[i].tti.util_sel =
- VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
-
- device_config->vp_config[i].tti.ltimer_val =
- (VXGE_TTI_LTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.rtimer_val =
- (VXGE_TTI_RTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
- device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
- device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
- device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
- device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
- device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
- device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
-
- /* Configure Rx rings */
- device_config->vp_config[i].ring.enable =
- VXGE_HW_RING_ENABLE;
-
- device_config->vp_config[i].ring.ring_blocks =
- VXGE_HW_DEF_RING_BLOCKS;
-
- device_config->vp_config[i].ring.buffer_mode =
- VXGE_HW_RING_RXD_BUFFER_MODE_1;
-
- device_config->vp_config[i].ring.rxds_limit =
- VXGE_HW_DEF_RING_RXDS_LIMIT;
-
- device_config->vp_config[i].ring.scatter_mode =
- VXGE_HW_RING_SCATTER_MODE_A;
-
- /* Configure rti properties */
- device_config->vp_config[i].rti.intr_enable =
- VXGE_HW_TIM_INTR_ENABLE;
-
- device_config->vp_config[i].rti.btimer_val =
- (VXGE_RTI_BTIMER_VAL * 1000)/272;
-
- device_config->vp_config[i].rti.timer_ac_en =
- VXGE_HW_TIM_TIMER_AC_ENABLE;
-
- device_config->vp_config[i].rti.timer_ci_en =
- VXGE_HW_TIM_TIMER_CI_DISABLE;
-
- device_config->vp_config[i].rti.timer_ri_en =
- VXGE_HW_TIM_TIMER_RI_DISABLE;
-
- device_config->vp_config[i].rti.util_sel =
- VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
-
- device_config->vp_config[i].rti.urange_a =
- RTI_RX_URANGE_A;
- device_config->vp_config[i].rti.urange_b =
- RTI_RX_URANGE_B;
- device_config->vp_config[i].rti.urange_c =
- RTI_RX_URANGE_C;
- device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
- device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
- device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
- device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
-
- device_config->vp_config[i].rti.rtimer_val =
- (VXGE_RTI_RTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].rti.ltimer_val =
- (VXGE_RTI_LTIMER_VAL * 1000) / 272;
-
- device_config->vp_config[i].rpa_strip_vlan_tag =
- vlan_tag_strip;
- }
-
- driver_config->vpath_per_dev = temp;
- return no_of_vpaths;
-}
-
-/* initialize device configuratrions */
-static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
- int *intr_type)
-{
- /* Used for CQRQ/SRQ. */
- device_config->dma_blockpool_initial =
- VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
-
- device_config->dma_blockpool_max =
- VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
-
- if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
- max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
-
- if (!IS_ENABLED(CONFIG_PCI_MSI)) {
- vxge_debug_init(VXGE_ERR,
- "%s: This Kernel does not support "
- "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
- *intr_type = INTA;
- }
-
- /* Configure whether MSI-X or IRQL. */
- switch (*intr_type) {
- case INTA:
- device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
- break;
-
- case MSI_X:
- device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
- break;
- }
-
- /* Timer period between device poll */
- device_config->device_poll_millis = VXGE_TIMER_DELAY;
-
- /* Configure mac based steering. */
- device_config->rts_mac_en = addr_learn_en;
-
- /* Configure Vpaths */
- device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
-
- vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
- __func__);
- vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
- device_config->intr_mode);
- vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
- device_config->device_poll_millis);
- vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
- device_config->rth_en);
- vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
- device_config->rth_it_type);
-}
-
-static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
-{
- int i;
-
- vxge_debug_init(VXGE_TRACE,
- "%s: %d Vpath(s) opened",
- vdev->ndev->name, vdev->no_of_vpath);
-
- switch (vdev->config.intr_type) {
- case INTA:
- vxge_debug_init(VXGE_TRACE,
- "%s: Interrupt type INTA", vdev->ndev->name);
- break;
-
- case MSI_X:
- vxge_debug_init(VXGE_TRACE,
- "%s: Interrupt type MSI-X", vdev->ndev->name);
- break;
- }
-
- if (vdev->config.rth_steering) {
- vxge_debug_init(VXGE_TRACE,
- "%s: RTH steering enabled for TCP_IPV4",
- vdev->ndev->name);
- } else {
- vxge_debug_init(VXGE_TRACE,
- "%s: RTH steering disabled", vdev->ndev->name);
- }
-
- switch (vdev->config.tx_steering_type) {
- case NO_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- break;
- case TX_PRIORITY_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Unsupported tx steering option",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- break;
- case TX_VLAN_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Unsupported tx steering option",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- break;
- case TX_MULTIQ_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx multiqueue steering enabled",
- vdev->ndev->name);
- break;
- case TX_PORT_STEERING:
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx port steering enabled",
- vdev->ndev->name);
- break;
- default:
- vxge_debug_init(VXGE_ERR,
- "%s: Unsupported tx steering type",
- vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Tx steering disabled", vdev->ndev->name);
- vdev->config.tx_steering_type = 0;
- }
-
- if (vdev->config.addr_learn_en)
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC Address learning enabled", vdev->ndev->name);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: MTU size - %d", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].mtu);
- vxge_debug_init(VXGE_TRACE,
- "%s: VLAN tag stripping %s", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].rpa_strip_vlan_tag
- ? "Enabled" : "Disabled");
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: Max frags : %d", vdev->ndev->name,
- ((vdev->devh))->
- config.vp_config[i].fifo.max_frags);
- break;
- }
-}
-
-/**
- * vxge_pm_suspend - vxge power management suspend entry point
- * @dev_d: device pointer
- *
- */
-static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
-{
- return -ENOSYS;
-}
-/**
- * vxge_pm_resume - vxge power management resume entry point
- * @dev_d: device pointer
- *
- */
-static int __maybe_unused vxge_pm_resume(struct device *dev_d)
-{
- return -ENOSYS;
-}
-
-/**
- * vxge_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev)) {
- /* Bring down the card, while avoiding PCI I/O */
- do_vxge_close(netdev, 0);
- }
-
- pci_disable_device(pdev);
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * vxge_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
- * followed by fixups by BIOS, and has its config space
- * set up identically to what it was at cold boot.
- */
-static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- struct vxgedev *vdev = netdev_priv(netdev);
-
- if (pci_enable_device(pdev)) {
- netdev_err(netdev, "Cannot re-enable device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- pci_set_master(pdev);
- do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * vxge_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells
- * us that its OK to resume normal operation.
- */
-static void vxge_io_resume(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
- struct net_device *netdev = hldev->ndev;
-
- if (netif_running(netdev)) {
- if (vxge_open(netdev)) {
- netdev_err(netdev,
- "Can't bring device back up after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
-}
-
-static inline u32 vxge_get_num_vfs(u64 function_mode)
-{
- u32 num_functions = 0;
-
- switch (function_mode) {
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
- case VXGE_HW_FUNCTION_MODE_SRIOV_8:
- num_functions = 8;
- break;
- case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
- num_functions = 1;
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV:
- case VXGE_HW_FUNCTION_MODE_MRIOV:
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
- num_functions = 17;
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV_4:
- num_functions = 4;
- break;
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
- num_functions = 2;
- break;
- case VXGE_HW_FUNCTION_MODE_MRIOV_8:
- num_functions = 8; /* TODO */
- break;
- }
- return num_functions;
-}
-
-int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
-{
- struct __vxge_hw_device *hldev = vdev->devh;
- u32 maj, min, bld, cmaj, cmin, cbld;
- enum vxge_hw_status status;
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
- if (ret) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
- VXGE_DRIVER_NAME, fw_name);
- goto out;
- }
-
- /* Load the new firmware onto the adapter */
- status = vxge_update_fw_image(hldev, fw->data, fw->size);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: FW image download to adapter failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- /* Read the version of the new firmware */
- status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: Upgrade read version failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- cmaj = vdev->config.device_hw_info.fw_version.major;
- cmin = vdev->config.device_hw_info.fw_version.minor;
- cbld = vdev->config.device_hw_info.fw_version.build;
- /* It's possible the version in /lib/firmware is not the latest version.
- * If so, we could get into a loop of trying to upgrade to the latest
- * and flashing the older version.
- */
- if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
- !override) {
- ret = -EINVAL;
- goto out;
- }
-
- printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
- maj, min, bld);
-
- /* Flash the adapter with the new firmware */
- status = vxge_hw_flash_fw(hldev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
- VXGE_DRIVER_NAME, fw_name);
- ret = -EIO;
- goto out;
- }
-
- printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
- "hard reset before using, thus requiring a system reboot or a "
- "hotplug event.\n");
-
-out:
- release_firmware(fw);
- return ret;
-}
-
-static int vxge_probe_fw_update(struct vxgedev *vdev)
-{
- u32 maj, min, bld;
- int ret, gpxe = 0;
- char *fw_name;
-
- maj = vdev->config.device_hw_info.fw_version.major;
- min = vdev->config.device_hw_info.fw_version.minor;
- bld = vdev->config.device_hw_info.fw_version.build;
-
- if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
- return 0;
-
- /* Ignore the build number when determining if the current firmware is
- * "too new" to load the driver
- */
- if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
- "version, unable to load driver\n",
- VXGE_DRIVER_NAME);
- return -EINVAL;
- }
-
- /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
- * work with this driver.
- */
- if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
- "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
- return -EINVAL;
- }
-
- /* If file not specified, determine gPXE or not */
- if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
- int i;
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
- if (vdev->devh->eprom_versions[i]) {
- gpxe = 1;
- break;
- }
- }
- if (gpxe)
- fw_name = "vxge/X3fw-pxe.ncf";
- else
- fw_name = "vxge/X3fw.ncf";
-
- ret = vxge_fw_upgrade(vdev, fw_name, 0);
- /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
- * probe, so ignore them
- */
- if (ret != -EINVAL && ret != -ENOENT)
- return -EIO;
- else
- ret = 0;
-
- if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
- VXGE_FW_VER(maj, min, 0)) {
- vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
- " be used with this driver.",
- VXGE_DRIVER_NAME, maj, min, bld);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int is_sriov_initialized(struct pci_dev *pdev)
-{
- int pos;
- u16 ctrl;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
- if (ctrl & PCI_SRIOV_CTRL_VFE)
- return 1;
- }
- return 0;
-}
-
-static const struct vxge_hw_uld_cbs vxge_callbacks = {
- .link_up = vxge_callback_link_up,
- .link_down = vxge_callback_link_down,
- .crit_err = vxge_callback_crit_err,
-};
-
-/**
- * vxge_probe
- * @pdev : structure containing the PCI related information of the device.
- * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
- * Description:
- * This function is called when a new PCI device gets detected and initializes
- * it.
- * Return value:
- * returns 0 on success and negative on failure.
- *
- */
-static int
-vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
-{
- struct __vxge_hw_device *hldev;
- enum vxge_hw_status status;
- int ret;
- u64 vpath_mask = 0;
- struct vxgedev *vdev;
- struct vxge_config *ll_config = NULL;
- struct vxge_hw_device_config *device_config = NULL;
- struct vxge_hw_device_attr attr;
- int i, j, no_of_vpath = 0, max_vpath_supported = 0;
- u8 *macaddr;
- struct vxge_mac_addrs *entry;
- static int bus = -1, device = -1;
- u32 host_type;
- u8 new_device = 0;
- enum vxge_hw_status is_privileged;
- u32 function_mode;
- u32 num_vfs = 0;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- attr.pdev = pdev;
-
- /* In SRIOV-17 mode, functions of the same adapter
- * can be deployed on different buses
- */
- if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
- !pdev->is_virtfn)
- new_device = 1;
-
- bus = pdev->bus->number;
- device = PCI_SLOT(pdev->devfn);
-
- if (new_device) {
- if (driver_config->config_dev_cnt &&
- (driver_config->config_dev_cnt !=
- driver_config->total_dev_cnt))
- vxge_debug_init(VXGE_ERR,
- "%s: Configured %d of %d devices",
- VXGE_DRIVER_NAME,
- driver_config->config_dev_cnt,
- driver_config->total_dev_cnt);
- driver_config->config_dev_cnt = 0;
- driver_config->total_dev_cnt = 0;
- }
-
- /* Now making the CPU based no of vpath calculation
- * applicable for individual functions as well.
- */
- driver_config->g_no_cpus = 0;
- driver_config->vpath_per_dev = max_config_vpath;
-
- driver_config->total_dev_cnt++;
- if (++driver_config->config_dev_cnt > max_config_dev) {
- ret = 0;
- goto _exit0;
- }
-
- device_config = kzalloc(sizeof(struct vxge_hw_device_config),
- GFP_KERNEL);
- if (!device_config) {
- ret = -ENOMEM;
- vxge_debug_init(VXGE_ERR,
- "device_config : malloc failed %s %d",
- __FILE__, __LINE__);
- goto _exit0;
- }
-
- ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
- if (!ll_config) {
- ret = -ENOMEM;
- vxge_debug_init(VXGE_ERR,
- "device_config : malloc failed %s %d",
- __FILE__, __LINE__);
- goto _exit0;
- }
- ll_config->tx_steering_type = TX_MULTIQ_STEERING;
- ll_config->intr_type = MSI_X;
- ll_config->napi_weight = NAPI_POLL_WEIGHT;
- ll_config->rth_steering = RTH_STEERING;
-
- /* get the default configuration parameters */
- vxge_hw_device_config_default_get(device_config);
-
- /* initialize configuration parameters */
- vxge_device_config_init(device_config, &ll_config->intr_type);
-
- ret = pci_enable_device(pdev);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s : can not enable PCI device", __func__);
- goto _exit0;
- }
-
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s : using 64bit DMA", __func__);
- } else {
- ret = -ENOMEM;
- goto _exit1;
- }
-
- ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
- if (ret) {
- vxge_debug_init(VXGE_ERR,
- "%s : request regions failed", __func__);
- goto _exit1;
- }
-
- pci_set_master(pdev);
-
- attr.bar0 = pci_ioremap_bar(pdev, 0);
- if (!attr.bar0) {
- vxge_debug_init(VXGE_ERR,
- "%s : cannot remap io memory bar0", __func__);
- ret = -ENODEV;
- goto _exit2;
- }
- vxge_debug_ll_config(VXGE_TRACE,
- "pci ioremap bar0: %p:0x%llx",
- attr.bar0,
- (unsigned long long)pci_resource_start(pdev, 0));
-
- status = vxge_hw_device_hw_info_get(attr.bar0,
- &ll_config->device_hw_info);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: Reading of hardware info failed."
- "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
- vpath_mask = ll_config->device_hw_info.vpath_mask;
- if (vpath_mask == 0) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s: No vpaths available in device", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
- vxge_debug_ll_config(VXGE_TRACE,
- "%s:%d Vpath mask = %llx", __func__, __LINE__,
- (unsigned long long)vpath_mask);
-
- function_mode = ll_config->device_hw_info.function_mode;
- host_type = ll_config->device_hw_info.host_type;
- is_privileged = __vxge_hw_device_is_privilaged(host_type,
- ll_config->device_hw_info.func_id);
-
- /* Check how many vpaths are available */
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!((vpath_mask) & vxge_mBIT(i)))
- continue;
- max_vpath_supported++;
- }
-
- if (new_device)
- num_vfs = vxge_get_num_vfs(function_mode) - 1;
-
- /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
- (ll_config->intr_type != INTA)) {
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret)
- vxge_debug_ll_config(VXGE_ERR,
- "Failed in enabling SRIOV mode: %d\n", ret);
- /* No need to fail out, as an error here is non-fatal */
- }
-
- /*
- * Configure vpaths and get driver configured number of vpaths
- * which is less than or equal to the maximum vpaths per function.
- */
- no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
- if (!no_of_vpath) {
- vxge_debug_ll_config(VXGE_ERR,
- "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
- ret = 0;
- goto _exit3;
- }
-
- /* Setting driver callbacks */
- attr.uld_callbacks = &vxge_callbacks;
-
- status = vxge_hw_device_initialize(&hldev, &attr, device_config);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "Failed to initialize device (%d)", status);
- ret = -EINVAL;
- goto _exit3;
- }
-
- if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
- ll_config->device_hw_info.fw_version.minor,
- ll_config->device_hw_info.fw_version.build) >=
- VXGE_EPROM_FW_VER) {
- struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
-
- status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
- VXGE_DRIVER_NAME);
- /* This is a non-fatal error, continue */
- }
-
- for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
- hldev->eprom_versions[i] = img[i].version;
- if (!img[i].is_valid)
- break;
- vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
- "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
- VXGE_EPROM_IMG_MAJOR(img[i].version),
- VXGE_EPROM_IMG_MINOR(img[i].version),
- VXGE_EPROM_IMG_FIX(img[i].version),
- VXGE_EPROM_IMG_BUILD(img[i].version));
- }
- }
-
- /* if FCS stripping is not disabled in MAC fail driver load */
- status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
- " failing driver load", VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit4;
- }
-
- /* Always enable HWTS. This will always cause the FCS to be invalid,
- * due to the fact that HWTS is using the FCS as the location of the
- * timestamp. The HW FCS checking will still correctly determine if
- * there is a valid checksum, and the FCS is being removed by the driver
- * anyway. So no functionality is being lost. Since it is always
- * enabled, we now simply use the ioctl call to set whether or not the
- * driver should be paying attention to the HWTS.
- */
- if (is_privileged == VXGE_HW_OK) {
- status = vxge_timestamp_config(hldev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
- VXGE_DRIVER_NAME);
- ret = -EFAULT;
- goto _exit4;
- }
- }
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
-
- /* set private device info */
- pci_set_drvdata(pdev, hldev);
-
- ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
- ll_config->addr_learn_en = addr_learn_en;
- ll_config->rth_algorithm = RTH_ALG_JENKINS;
- ll_config->rth_hash_type_tcpipv4 = 1;
- ll_config->rth_hash_type_ipv4 = 0;
- ll_config->rth_hash_type_tcpipv6 = 0;
- ll_config->rth_hash_type_ipv6 = 0;
- ll_config->rth_hash_type_tcpipv6ex = 0;
- ll_config->rth_hash_type_ipv6ex = 0;
- ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
- ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
-
- ret = vxge_device_register(hldev, ll_config, no_of_vpath, &vdev);
- if (ret) {
- ret = -EINVAL;
- goto _exit4;
- }
-
- ret = vxge_probe_fw_update(vdev);
- if (ret)
- goto _exit5;
-
- vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
- vxge_hw_device_trace_level_get(hldev));
-
- /* set private HW device info */
- vdev->mtu = VXGE_HW_DEFAULT_MTU;
- vdev->bar0 = attr.bar0;
- vdev->max_vpath_supported = max_vpath_supported;
- vdev->no_of_vpath = no_of_vpath;
-
- /* Virtual Path count */
- for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- if (!vxge_bVALn(vpath_mask, i, 1))
- continue;
- if (j >= vdev->no_of_vpath)
- break;
-
- vdev->vpaths[j].is_configured = 1;
- vdev->vpaths[j].device_id = i;
- vdev->vpaths[j].ring.driver_id = j;
- vdev->vpaths[j].vdev = vdev;
- vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
- memcpy((u8 *)vdev->vpaths[j].macaddr,
- ll_config->device_hw_info.mac_addrs[i],
- ETH_ALEN);
-
- /* Initialize the mac address list header */
- INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
-
- vdev->vpaths[j].mac_addr_cnt = 0;
- vdev->vpaths[j].mcast_addr_cnt = 0;
- j++;
- }
- vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
- vdev->max_config_port = max_config_port;
-
- vdev->vlan_tag_strip = vlan_tag_strip;
-
- /* map the hashing selector table to the configured vpaths */
- for (i = 0; i < vdev->no_of_vpath; i++)
- vdev->vpath_selector[i] = vpath_selector[i];
-
- macaddr = (u8 *)vdev->vpaths[0].macaddr;
-
- ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
-
- vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
- vdev->ndev->name, ll_config->device_hw_info.serial_number);
-
- vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
- vdev->ndev->name, ll_config->device_hw_info.part_number);
-
- vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
- vdev->ndev->name, ll_config->device_hw_info.product_desc);
-
- vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
- vdev->ndev->name, macaddr);
-
- vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
- vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
-
- vxge_debug_init(VXGE_TRACE,
- "%s: Firmware version : %s Date : %s", vdev->ndev->name,
- ll_config->device_hw_info.fw_version.version,
- ll_config->device_hw_info.fw_date.date);
-
- if (new_device) {
- switch (ll_config->device_hw_info.function_mode) {
- case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
- vxge_debug_init(VXGE_TRACE,
- "%s: Single Function Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
- vxge_debug_init(VXGE_TRACE,
- "%s: Multi Function Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_SRIOV:
- vxge_debug_init(VXGE_TRACE,
- "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
- break;
- case VXGE_HW_FUNCTION_MODE_MRIOV:
- vxge_debug_init(VXGE_TRACE,
- "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
- break;
- }
- }
-
- vxge_print_parm(vdev, vpath_mask);
-
- /* Store the fw version for ethttool option */
- strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
- eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr);
-
- /* Copy the station mac address to the list */
- for (i = 0; i < vdev->no_of_vpath; i++) {
- entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
- if (NULL == entry) {
- vxge_debug_init(VXGE_ERR,
- "%s: mac_addr_list : memory allocation failed",
- vdev->ndev->name);
- ret = -EPERM;
- goto _exit6;
- }
- macaddr = (u8 *)&entry->macaddr;
- memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
- list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
- vdev->vpaths[i].mac_addr_cnt = 1;
- }
-
- kfree(device_config);
-
- /*
- * INTA is shared in multi-function mode. This is unlike the INTA
- * implementation in MR mode, where each VH has its own INTA message.
- * - INTA is masked (disabled) as long as at least one function sets
- * its TITAN_MASK_ALL_INT.ALARM bit.
- * - INTA is unmasked (enabled) when all enabled functions have cleared
- * their own TITAN_MASK_ALL_INT.ALARM bit.
- * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
- * Though this driver leaves the top level interrupts unmasked while
- * leaving the required module interrupt bits masked on exit, there
- * could be a rougue driver around that does not follow this procedure
- * resulting in a failure to generate interrupts. The following code is
- * present to prevent such a failure.
- */
-
- if (ll_config->device_hw_info.function_mode ==
- VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
- if (vdev->config.intr_type == INTA)
- vxge_hw_device_unmask_all(hldev);
-
- vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
- vdev->ndev->name, __func__, __LINE__);
-
- vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
- vxge_hw_device_trace_level_get(hldev));
-
- kfree(ll_config);
- return 0;
-
-_exit6:
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_free_mac_add_list(&vdev->vpaths[i]);
-_exit5:
- vxge_device_unregister(hldev);
-_exit4:
- vxge_hw_device_terminate(hldev);
- pci_disable_sriov(pdev);
-_exit3:
- iounmap(attr.bar0);
-_exit2:
- pci_release_region(pdev, 0);
-_exit1:
- pci_disable_device(pdev);
-_exit0:
- kfree(ll_config);
- kfree(device_config);
- driver_config->config_dev_cnt--;
- driver_config->total_dev_cnt--;
- return ret;
-}
-
-/**
- * vxge_remove - Free the PCI device
- * @pdev: structure containing the PCI related information of the device.
- * Description: This function is called by the Pci subsystem to release a
- * PCI device and free up all resource held up by the device.
- */
-static void vxge_remove(struct pci_dev *pdev)
-{
- struct __vxge_hw_device *hldev;
- struct vxgedev *vdev;
- int i;
-
- hldev = pci_get_drvdata(pdev);
- if (hldev == NULL)
- return;
-
- vdev = netdev_priv(hldev->ndev);
-
- vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
- vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
- __func__);
-
- for (i = 0; i < vdev->no_of_vpath; i++)
- vxge_free_mac_add_list(&vdev->vpaths[i]);
-
- vxge_device_unregister(hldev);
- /* Do not call pci_disable_sriov here, as it will break child devices */
- vxge_hw_device_terminate(hldev);
- iounmap(vdev->bar0);
- pci_release_region(pdev, 0);
- pci_disable_device(pdev);
- driver_config->config_dev_cnt--;
- driver_config->total_dev_cnt--;
-
- vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
- __func__, __LINE__);
- vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
- __LINE__);
-}
-
-static const struct pci_error_handlers vxge_err_handler = {
- .error_detected = vxge_io_error_detected,
- .slot_reset = vxge_io_slot_reset,
- .resume = vxge_io_resume,
-};
-
-static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume);
-
-static struct pci_driver vxge_driver = {
- .name = VXGE_DRIVER_NAME,
- .id_table = vxge_id_table,
- .probe = vxge_probe,
- .remove = vxge_remove,
- .driver.pm = &vxge_pm_ops,
- .err_handler = &vxge_err_handler,
-};
-
-static int __init
-vxge_starter(void)
-{
- int ret = 0;
-
- pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
- pr_info("Driver version: %s\n", DRV_VERSION);
-
- verify_bandwidth();
-
- driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
- if (!driver_config)
- return -ENOMEM;
-
- ret = pci_register_driver(&vxge_driver);
- if (ret) {
- kfree(driver_config);
- goto err;
- }
-
- if (driver_config->config_dev_cnt &&
- (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
- vxge_debug_init(VXGE_ERR,
- "%s: Configured %d of %d devices",
- VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
- driver_config->total_dev_cnt);
-err:
- return ret;
-}
-
-static void __exit
-vxge_closer(void)
-{
- pci_unregister_driver(&vxge_driver);
- kfree(driver_config);
-}
-module_init(vxge_starter);
-module_exit(vxge_closer);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
deleted file mode 100644
index da9d2c191828..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ /dev/null
@@ -1,516 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_MAIN_H
-#define VXGE_MAIN_H
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-version.h"
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
-
-#define VXGE_DRIVER_NAME "vxge"
-#define VXGE_DRIVER_VENDOR "Neterion, Inc"
-#define VXGE_DRIVER_FW_VERSION_MAJOR 1
-
-#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
- VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
- VXGE_VERSION_FOR
-
-#define PCI_DEVICE_ID_TITAN_WIN 0x5733
-#define PCI_DEVICE_ID_TITAN_UNI 0x5833
-#define VXGE_HW_TITAN1_PCI_REVISION 1
-#define VXGE_HW_TITAN1A_PCI_REVISION 2
-
-#define VXGE_USE_DEFAULT 0xffffffff
-#define VXGE_HW_VPATH_MSIX_ACTIVE 4
-#define VXGE_ALARM_MSIX_ID 2
-#define VXGE_HW_RXSYNC_FREQ_CNT 4
-#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
-#define VXGE_LL_RX_COPY_THRESHOLD 256
-#define VXGE_DEF_FIFO_LENGTH 84
-
-#define NO_STEERING 0
-#define PORT_STEERING 0x1
-#define RTH_STEERING 0x2
-#define RX_TOS_STEERING 0x3
-#define RX_VLAN_STEERING 0x4
-#define RTH_BUCKET_SIZE 4
-
-#define TX_PRIORITY_STEERING 1
-#define TX_VLAN_STEERING 2
-#define TX_PORT_STEERING 3
-#define TX_MULTIQ_STEERING 4
-
-#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
-
-#define VXGE_TTI_BTIMER_VAL 250000
-
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_T1A_TTI_LTIMER_VAL 80
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_TTI_RTIMER_ADAPT_VAL 10
-#define VXGE_T1A_TTI_RTIMER_VAL 400
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
-#define VXGE_RTI_RTIMER_ADAPT_VAL 15
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
-#define VXGE_ISR_POLLING_CNT 8
-#define VXGE_MAX_CONFIG_DEV 0xFF
-#define VXGE_EXEC_MODE_DISABLE 0
-#define VXGE_EXEC_MODE_ENABLE 1
-#define VXGE_MAX_CONFIG_PORT 1
-#define VXGE_ALL_VID_DISABLE 0
-#define VXGE_ALL_VID_ENABLE 1
-#define VXGE_PAUSE_CTRL_DISABLE 0
-#define VXGE_PAUSE_CTRL_ENABLE 1
-
-#define TTI_TX_URANGE_A 5
-#define TTI_TX_URANGE_B 15
-#define TTI_TX_URANGE_C 40
-#define TTI_TX_UFC_A 5
-#define TTI_TX_UFC_B 40
-#define TTI_TX_UFC_C 60
-#define TTI_TX_UFC_D 100
-#define TTI_T1A_TX_UFC_A 30
-#define TTI_T1A_TX_UFC_B 80
-/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
-/* Slope - 93 */
-/* 60 - 9k Mtu, 140 - 1.5k mtu */
-#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
-
-/* Slope - 37 */
-/* 100 - 9k Mtu, 300 - 1.5k mtu */
-#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
-
-
-#define RTI_RX_URANGE_A 5
-#define RTI_RX_URANGE_B 15
-#define RTI_RX_URANGE_C 40
-#define RTI_T1A_RX_URANGE_A 1
-#define RTI_T1A_RX_URANGE_B 20
-#define RTI_T1A_RX_URANGE_C 50
-#define RTI_RX_UFC_A 1
-#define RTI_RX_UFC_B 5
-#define RTI_RX_UFC_C 10
-#define RTI_RX_UFC_D 15
-#define RTI_T1A_RX_UFC_B 20
-#define RTI_T1A_RX_UFC_C 50
-#define RTI_T1A_RX_UFC_D 60
-
-/*
- * The interrupt rate is maintained at 3k per second with the moderation
- * parameters for most traffic but not all. This is the maximum interrupt
- * count allowed per function with INTA or per vector in the case of
- * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
- */
-#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
-#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
-
-/* Milli secs timer period */
-#define VXGE_TIMER_DELAY 10000
-
-#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
-
-#define is_sriov(function_mode) \
- ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
- (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
- (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
-
-enum vxge_reset_event {
- /* reset events */
- VXGE_LL_VPATH_RESET = 0,
- VXGE_LL_DEVICE_RESET = 1,
- VXGE_LL_FULL_RESET = 2,
- VXGE_LL_START_RESET = 3,
- VXGE_LL_COMPL_RESET = 4
-};
-/* These flags represent the devices temporary state */
-enum vxge_device_state_t {
-__VXGE_STATE_RESET_CARD = 0,
-__VXGE_STATE_CARD_UP
-};
-
-enum vxge_mac_addr_state {
- /* mac address states */
- VXGE_LL_MAC_ADDR_IN_LIST = 0,
- VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
-};
-
-struct vxge_drv_config {
- int config_dev_cnt;
- int total_dev_cnt;
- int g_no_cpus;
- unsigned int vpath_per_dev;
-};
-
-struct macInfo {
- unsigned char macaddr[ETH_ALEN];
- unsigned char macmask[ETH_ALEN];
- unsigned int vpath_no;
- enum vxge_mac_addr_state state;
-};
-
-struct vxge_config {
- int tx_pause_enable;
- int rx_pause_enable;
- int napi_weight;
- int intr_type;
-#define INTA 0
-#define MSI 1
-#define MSI_X 2
-
- int addr_learn_en;
-
- u32 rth_steering:2,
- rth_algorithm:2,
- rth_hash_type_tcpipv4:1,
- rth_hash_type_ipv4:1,
- rth_hash_type_tcpipv6:1,
- rth_hash_type_ipv6:1,
- rth_hash_type_tcpipv6ex:1,
- rth_hash_type_ipv6ex:1,
- rth_bkt_sz:8;
- int rth_jhash_golden_ratio;
- int tx_steering_type;
- int fifo_indicate_max_pkts;
- struct vxge_hw_device_hw_info device_hw_info;
-};
-
-struct vxge_msix_entry {
- /* Mimicing the msix_entry struct of Kernel. */
- u16 vector;
- u16 entry;
- u16 in_use;
- void *arg;
-};
-
-/* Software Statistics */
-
-struct vxge_sw_stats {
-
- /* Virtual Path */
- unsigned long vpaths_open;
- unsigned long vpath_open_fail;
-
- /* Misc. */
- unsigned long link_up;
- unsigned long link_down;
-};
-
-struct vxge_mac_addrs {
- struct list_head item;
- u64 macaddr;
- u64 macmask;
- enum vxge_mac_addr_state state;
-};
-
-struct vxgedev;
-
-struct vxge_fifo_stats {
- struct u64_stats_sync syncp;
- u64 tx_frms;
- u64 tx_bytes;
-
- unsigned long tx_errors;
- unsigned long txd_not_free;
- unsigned long txd_out_of_desc;
- unsigned long pci_map_fail;
-};
-
-struct vxge_fifo {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_fifo *handle;
- struct netdev_queue *txq;
-
- int tx_steering_type;
- int indicate_max_pkts;
-
- /* Adaptive interrupt moderation parameters used in T1A */
- unsigned long interrupt_count;
- unsigned long jiffies;
-
- u32 tx_vector_no;
- /* Tx stats */
- struct vxge_fifo_stats stats;
-} ____cacheline_aligned;
-
-struct vxge_ring_stats {
- struct u64_stats_sync syncp;
- u64 rx_frms;
- u64 rx_mcast;
- u64 rx_bytes;
-
- unsigned long rx_errors;
- unsigned long rx_dropped;
- unsigned long prev_rx_frms;
- unsigned long pci_map_fail;
- unsigned long skb_alloc_fail;
-};
-
-struct vxge_ring {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_ring *handle;
- /* The vpath id maintained in the driver -
- * 0 to 'maximum_vpaths_in_function - 1'
- */
- int driver_id;
-
- /* Adaptive interrupt moderation parameters used in T1A */
- unsigned long interrupt_count;
- unsigned long jiffies;
-
- /* copy of the flag indicating whether rx_hwts is to be used */
- u32 rx_hwts:1;
-
- int pkts_processed;
- int budget;
-
- struct napi_struct napi;
- struct napi_struct *napi_p;
-
-#define VXGE_MAX_MAC_ADDR_COUNT 30
-
- int vlan_tag_strip;
- u32 rx_vector_no;
- enum vxge_hw_status last_status;
-
- /* Rx stats */
- struct vxge_ring_stats stats;
-} ____cacheline_aligned;
-
-struct vxge_vpath {
- struct vxge_fifo fifo;
- struct vxge_ring ring;
-
- struct __vxge_hw_vpath_handle *handle;
-
- /* Actual vpath id for this vpath in the device - 0 to 16 */
- int device_id;
- int max_mac_addr_cnt;
- int is_configured;
- int is_open;
- struct vxgedev *vdev;
- u8 macaddr[ETH_ALEN];
- u8 macmask[ETH_ALEN];
-
-#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
- /* mac addresses currently programmed into NIC */
- u16 mac_addr_cnt;
- u16 mcast_addr_cnt;
- struct list_head mac_addr_list;
-
- u32 level_err;
- u32 level_trace;
-};
-#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
- for (i = 0; i < vdev->no_of_vpath; i++) { \
- vdev->vpaths[i].level_err = err; \
- vdev->vpaths[i].level_trace = trace; \
- } \
- vdev->level_err = err; \
- vdev->level_trace = trace; \
-}
-
-struct vxgedev {
- struct net_device *ndev;
- struct pci_dev *pdev;
- struct __vxge_hw_device *devh;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- int vlan_tag_strip;
- struct vxge_config config;
- unsigned long state;
-
- /* Indicates which vpath to reset */
- unsigned long vp_reset;
-
- /* Timer used for polling vpath resets */
- struct timer_list vp_reset_timer;
-
- /* Timer used for polling vpath lockup */
- struct timer_list vp_lockup_timer;
-
- /*
- * Flags to track whether device is in All Multicast
- * or in promiscuous mode.
- */
- u16 all_multi_flg;
-
- /* A flag indicating whether rx_hwts is to be used or not. */
- u32 rx_hwts:1,
- titan1:1;
-
- struct vxge_msix_entry *vxge_entries;
- struct msix_entry *entries;
- /*
- * 4 for each vpath * 17;
- * total is 68
- */
-#define VXGE_MAX_REQUESTED_MSIX 68
-#define VXGE_INTR_STRLEN 80
- char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
-
- enum vxge_hw_event cric_err_event;
-
- int max_vpath_supported;
- int no_of_vpath;
-
- struct napi_struct napi;
- /* A debug option, when enabled and if error condition occurs,
- * the driver will do following steps:
- * - mask all interrupts
- * - Not clear the source of the alarm
- * - gracefully stop all I/O
- * A diagnostic dump of register and stats at this point
- * reveals very useful information.
- */
- int exec_mode;
- int max_config_port;
- struct vxge_vpath *vpaths;
-
- struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
- void __iomem *bar0;
- struct vxge_sw_stats stats;
- int mtu;
- /* Below variables are used for vpath selection to transmit a packet */
- u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
- u64 vpaths_deployed;
-
- u32 intr_cnt;
- u32 level_err;
- u32 level_trace;
- char fw_version[VXGE_HW_FW_STRLEN];
- struct work_struct reset_task;
-};
-
-struct vxge_rx_priv {
- struct sk_buff *skb;
- unsigned char *skb_data;
- dma_addr_t data_dma;
- dma_addr_t data_size;
-};
-
-struct vxge_tx_priv {
- struct sk_buff *skb;
- dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
-};
-
-#define VXGE_MODULE_PARAM_INT(p, val) \
- static int p = val; \
- module_param(p, int, 0)
-
-static inline
-void vxge_os_timer(struct timer_list *timer, void (*func)(struct timer_list *),
- unsigned long timeout)
-{
- timer_setup(timer, func, 0);
- mod_timer(timer, jiffies + timeout);
-}
-
-void vxge_initialize_ethtool_ops(struct net_device *ndev);
-int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
-
-/* #define VXGE_DEBUG_INIT: debug for initialization functions
- * #define VXGE_DEBUG_TX : debug transmit related functions
- * #define VXGE_DEBUG_RX : debug recevice related functions
- * #define VXGE_DEBUG_MEM : debug memory module
- * #define VXGE_DEBUG_LOCK: debug locks
- * #define VXGE_DEBUG_SEM : debug semaphore
- * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
-*/
-#define VXGE_DEBUG_INIT 0x00000001
-#define VXGE_DEBUG_TX 0x00000002
-#define VXGE_DEBUG_RX 0x00000004
-#define VXGE_DEBUG_MEM 0x00000008
-#define VXGE_DEBUG_LOCK 0x00000010
-#define VXGE_DEBUG_SEM 0x00000020
-#define VXGE_DEBUG_ENTRYEXIT 0x00000040
-#define VXGE_DEBUG_INTR 0x00000080
-#define VXGE_DEBUG_LL_CONFIG 0x00000100
-
-/* Debug tracing for VXGE driver */
-#ifndef VXGE_DEBUG_MASK
-#define VXGE_DEBUG_MASK 0x0
-#endif
-
-#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
-#define vxge_debug_ll_config(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_ll_config(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
-#define vxge_debug_init(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_init(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
-#define vxge_debug_tx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_tx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
-#define vxge_debug_rx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_rx(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
-#define vxge_debug_mem(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_mem(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
-#define vxge_debug_entryexit(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_entryexit(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
-#define vxge_debug_intr(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
-#else
-#define vxge_debug_intr(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
- vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
- level, mask);\
- VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
- vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
- vdev->devh), \
- vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
- vdev->devh));\
-}
-
-#ifdef NETIF_F_GSO
-#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
-#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
-#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
-#endif
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-reg.h b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
deleted file mode 100644
index 3e658b175947..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-reg.h
+++ /dev/null
@@ -1,4636 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized
- * Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_REG_H
-#define VXGE_REG_H
-
-/*
- * vxge_mBIT(loc) - set bit at offset
- */
-#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc))
-
-/*
- * vxge_vBIT(val, loc, sz) - set bits at offset
- */
-#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
-#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
-
-/*
- * vxge_bVALn(bits, loc, n) - Get the value of n bits at location
- */
-#define vxge_bVALn(bits, loc, n) \
- ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1))
-
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \
- vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \
- vxge_bVALn(bits, 48, 8)
-#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \
- vxge_bVALn(bits, 56, 8)
-
-#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \
- vxge_bVALn(bits, 3, 5)
-#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \
- vxge_bVALn(bits, 5, 3)
-#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5
-
-#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17
-#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17
-#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
-#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
-
-#define VXGE_HW_FW_API_GET_EPROM_REV 31
-
-#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
-#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
-#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
-#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
-
-#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
-#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
-#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
-#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
-
-#define VXGE_HW_FW_API_GET_FUNC_MODE 29
-#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
-
-#define VXGE_HW_FW_UPGRADE_MEMO 13
-#define VXGE_HW_FW_UPGRADE_ACTION 16
-#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
-#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
-#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
-#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
-
-#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
-#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
-#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
-
-#define VXGE_HW_ASIC_MODE_RESERVED 0
-#define VXGE_HW_ASIC_MODE_NO_IOV 1
-#define VXGE_HW_ASIC_MODE_SR_IOV 2
-#define VXGE_HW_ASIC_MODE_MR_IOV 3
-
-#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19)
-#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23)
-#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31)
-
-#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1)
-
-#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \
- vxge_bVALn(bits, 50, 14)
-
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \
- vxge_bVALn(bits, 0, 17)
-
-#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \
- vxge_bVALn(bits, 3, 5)
-
-#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \
- vxge_bVALn(bits, 17, 15)
-
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2
-
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1
-
-#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \
- (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7))
-#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \
- vxge_bVALn(val, 61, 3)
-#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \
- (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7))
-#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \
- vxge_bVALn(val, 61, 3)
-
-#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits
-#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits
-
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \
- vxge_bVALn(bits, 1, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \
- vxge_bVALn(bits, 17, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \
- vxge_bVALn(bits, 33, 15)
-
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \
- vxge_vBIT(val, 49, 15)
-
-#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0
-#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1
-#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2
-
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1
-
-#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0
-#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1
-
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
-#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13
-
-#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
-
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \
- vxge_mBIT(54)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \
- vxge_bVALn(bits, 55, 5)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \
- vxge_vBIT(val, 55, 5)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \
- vxge_bVALn(bits, 62, 2)
-#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172
-
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \
- vxge_bVALn(bits, 7, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \
- vxge_bVALn(bits, 8, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \
- vxge_bVALn(bits, 4, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \
- vxge_bVALn(bits, 10, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \
- vxge_vBIT(val, 10, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \
- vxge_bVALn(bits, 15, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \
- vxge_bVALn(bits, 19, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \
- vxge_bVALn(bits, 23, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \
- vxge_bVALn(bits, 27, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \
- vxge_bVALn(bits, 31, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \
- vxge_bVALn(bits, 35, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \
- vxge_bVALn(bits, 39, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \
- vxge_bVALn(bits, 43, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \
- vxge_vBIT(val, 32, 32)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \
- vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \
- vxge_bVALn(bits, 32, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \
- vxge_vBIT(val, 32, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \
- vxge_bVALn(bits, 36, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \
- vxge_vBIT(val, 36, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \
- vxge_bVALn(bits, 40, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \
- vxge_vBIT(val, 40, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \
- vxge_bVALn(bits, 42, 2)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \
- vxge_vBIT(val, 42, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \
- vxge_bVALn(bits, 0, 64)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \
- vxge_bVALn(bits, 3, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
- vxge_bVALn(bits, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \
- vxge_vBIT(val, 0, 48)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \
- vxge_vBIT(val, 62, 2)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \
- vxge_bVALn(bits, 8, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \
- vxge_vBIT(val, 9, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \
- vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \
- vxge_bVALn(bits, 24, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \
- vxge_vBIT(val, 25, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \
- vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \
- vxge_bVALn(bits, 40, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 41, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \
- vxge_vBIT(val, 41, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \
- vxge_bVALn(bits, 48, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \
- vxge_bVALn(bits, 56, 1)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \
- vxge_bVALn(bits, 57, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \
- vxge_vBIT(val, 57, 7)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \
- vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \
- vxge_bVALn(bits, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \
- vxge_vBIT(val, 16, 16)
-
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \
- vxge_bVALn(bits, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \
- vxge_bVALn(bits, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
-
-#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
- vxge_bVALn(bits, 0, 18)
-
-#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \
- vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits)
-#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits)
-#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\
-) vxge_bVALn(bits, 48, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \
- vxge_bVALn(bits, 32, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \
- vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \
- vxge_bVALn(bits, 32, 16)
-
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\
-) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\
-) vxge_bVALn(bits, 32, 32)
-#define \
-VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \
- vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \
- vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32)
-#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16)
-#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16)
-#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16)
-
-#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 16, 8)
-
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 0, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 8, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \
- vxge_bVALn(bits, 16, 8)
-
-#define VXGE_HW_CONFIG_PRIV_H
-
-#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL
-#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL
-#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL
-#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL
-
-#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL
-
-#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL
-
-/*
- * The registers are memory mapped and are native big-endian byte order. The
- * little-endian hosts are handled by enabling hardware byte-swapping for
- * register and dma operations.
- */
-struct vxge_hw_legacy_reg {
-
- u8 unused00010[0x00010];
-
-/*0x00010*/ u64 toc_swapper_fb;
-#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00018*/ u64 pifm_rd_swap_en;
-#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00020*/ u64 pifm_rd_flip_en;
-#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00028*/ u64 pifm_wr_swap_en;
-#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00030*/ u64 pifm_wr_flip_en;
-#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64)
-/*0x00038*/ u64 toc_first_pointer;
-#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00040*/ u64 host_access_en;
-#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64)
-
-} __packed;
-
-struct vxge_hw_toc_reg {
-
- u8 unused00050[0x00050];
-
-/*0x00050*/ u64 toc_common_pointer;
-#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00058*/ u64 toc_memrepair_pointer;
-#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17];
-#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused001e0[0x001e0-0x000e8];
-
-/*0x001e0*/ u64 toc_mrpcim_pointer;
-#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
-/*0x001e8*/ u64 toc_srpcim_pointer[17];
-#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused00278[0x00278-0x00270];
-
-/*0x00278*/ u64 toc_vpmgmt_pointer[17];
-#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused00390[0x00390-0x00300];
-
-/*0x00390*/ u64 toc_vpath_pointer[17];
-#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
- u8 unused004a0[0x004a0-0x00418];
-
-/*0x004a0*/ u64 toc_kdfc;
-#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
-#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
-/*0x004a8*/ u64 toc_usdc;
-#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
-#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
-/*0x004b0*/ u64 toc_kdfc_vpath_stride;
-#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \
- vxge_vBIT(val, 0, 64)
-/*0x004b8*/ u64 toc_kdfc_fifo_stride;
-#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \
- vxge_vBIT(val, 0, 64)
-
-} __packed;
-
-struct vxge_hw_common_reg {
-
- u8 unused00a00[0x00a00];
-
-/*0x00a00*/ u64 prc_status1;
-#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n)
-/*0x00a08*/ u64 rxdcm_reset_in_progress;
-#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
-/*0x00a10*/ u64 replicq_flush_in_progress;
-#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a18*/ u64 rxpe_cmds_reset_in_progress;
-#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a20*/ u64 mxp_cmds_reset_in_progress;
-#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
-/*0x00a28*/ u64 noffload_reset_in_progress;
-#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
-/*0x00a30*/ u64 rd_req_in_progress;
-#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n)
-/*0x00a38*/ u64 rd_req_outstanding;
-#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n)
-/*0x00a40*/ u64 kdfc_reset_in_progress;
-#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
- u8 unused00b00[0x00b00-0x00a48];
-
-/*0x00b00*/ u64 one_cfg_vp;
-#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n)
-/*0x00b08*/ u64 one_common;
-#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n)
- u8 unused00b80[0x00b80-0x00b10];
-
-/*0x00b80*/ u64 tim_int_en;
-#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n)
-/*0x00b88*/ u64 tim_set_int_en;
-#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n)
-/*0x00b90*/ u64 tim_clr_int_en;
-#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n)
-/*0x00b98*/ u64 tim_mask_int_during_reset;
-#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n)
-/*0x00ba0*/ u64 tim_reset_in_progress;
-#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n)
-/*0x00ba8*/ u64 tim_outstanding_bmap;
-#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n)
- u8 unused00c00[0x00c00-0x00bb0];
-
-/*0x00c00*/ u64 msg_reset_in_progress;
-#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17)
-/*0x00c08*/ u64 msg_mxp_mr_ready;
-#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n)
-/*0x00c10*/ u64 msg_uxp_mr_ready;
-#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n)
-/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch;
-#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n)
-/*0x00c20*/ u64 msg_umq_rtl_bwr;
-#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n)
- u8 unused00d00[0x00d00-0x00c28];
-
-/*0x00d00*/ u64 cmn_rsthdlr_cfg0;
-#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17)
-/*0x00d08*/ u64 cmn_rsthdlr_cfg1;
-#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17)
-/*0x00d10*/ u64 cmn_rsthdlr_cfg2;
-#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17)
-/*0x00d18*/ u64 cmn_rsthdlr_cfg3;
-#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17)
-/*0x00d20*/ u64 cmn_rsthdlr_cfg4;
-#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17)
- u8 unused00d40[0x00d40-0x00d28];
-
-/*0x00d40*/ u64 cmn_rsthdlr_cfg8;
-#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17)
-/*0x00d48*/ u64 stats_cfg0;
-#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17)
- u8 unused00da8[0x00da8-0x00d50];
-
-/*0x00da8*/ u64 clear_msix_mask_vect[4];
-#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00dc8*/ u64 set_msix_mask_vect[4];
-#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17)
-/*0x00de8*/ u64 clear_msix_mask_all_vect;
-#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00df0*/ u64 set_msix_mask_all_vect;
-#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00df8*/ u64 mask_vector[4];
-#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17)
-/*0x00e18*/ u64 msix_pending_vector[4];
-#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00e38*/ u64 clr_msix_one_shot_vec[4];
-#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \
- vxge_vBIT(val, 0, 17)
-/*0x00e58*/ u64 titan_asic_id;
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8)
-#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8)
-/*0x00e60*/ u64 titan_general_int_status;
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2)
-#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \
- vxge_vBIT(val, 3, 17)
- u8 unused00e70[0x00e70-0x00e68];
-
-/*0x00e70*/ u64 titan_mask_all_int;
-#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7)
-#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15)
- u8 unused00e80[0x00e80-0x00e78];
-
-/*0x00e80*/ u64 tim_int_status0;
-#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64)
-/*0x00e88*/ u64 tim_int_mask0;
-#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64)
-/*0x00e90*/ u64 tim_int_status1;
-#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4)
-/*0x00e98*/ u64 tim_int_mask1;
-#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4)
-/*0x00ea0*/ u64 rti_int_status;
-#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17)
-/*0x00ea8*/ u64 rti_int_mask;
-#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17)
-/*0x00eb0*/ u64 adapter_status;
-#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0)
-#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1)
-#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2)
-#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3)
-#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4)
-#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5)
-#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6)
-#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7)
-#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8)
-#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9)
-#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10)
-#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11)
-#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12)
-#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8)
-/*0x00eb8*/ u64 gen_ctrl;
-#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0)
-#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1)
-#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2)
-#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3)
-#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4)
-#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5)
-#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4)
- u8 unused00ed0[0x00ed0-0x00ec0];
-
-/*0x00ed0*/ u64 adapter_ready;
-#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63)
-/*0x00ed8*/ u64 outstanding_read;
-#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17)
-/*0x00ee0*/ u64 vpath_rst_in_prog;
-#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17)
-/*0x00ee8*/ u64 vpath_reg_modified;
-#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17)
- u8 unused00fc0[0x00fc0-0x00ef0];
-
-/*0x00fc0*/ u64 cp_reset_in_progress;
-#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n)
- u8 unused01080[0x01080-0x00fc8];
-
-/*0x01080*/ u64 xgmac_ready;
-#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17)
- u8 unused010c0[0x010c0-0x01088];
-
-/*0x010c0*/ u64 fbif_ready;
-#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17)
- u8 unused01100[0x01100-0x010c8];
-
-/*0x01100*/ u64 vplane_assignments;
-#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5)
-/*0x01108*/ u64 vpath_assignments;
-#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17)
-/*0x01110*/ u64 resource_assignments;
-#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01118*/ u64 host_type_assignments;
-#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \
- vxge_vBIT(val, 5, 3)
- u8 unused01128[0x01128-0x01120];
-
-/*0x01128*/ u64 max_resource_assignments;
-#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \
- vxge_vBIT(val, 11, 5)
-/*0x01130*/ u64 pf_vpath_assignments;
-#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused01200[0x01200-0x01138];
-
-/*0x01200*/ u64 rts_access_icmp;
-#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01208*/ u64 rts_access_tcpsyn;
-#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01210*/ u64 rts_access_zl4pyld;
-#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01218*/ u64 rts_access_l4prtcl_tcp;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01220*/ u64 rts_access_l4prtcl_udp;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01228*/ u64 rts_access_l4prtcl_flex;
-#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17)
-/*0x01230*/ u64 rts_access_ipfrag;
-#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17)
-
-} __packed;
-
-struct vxge_hw_memrepair_reg {
- u64 unused1;
- u64 unused2;
-} __packed;
-
-struct vxge_hw_pcicfgmgmt_reg {
-
-/*0x00000*/ u64 resource_no;
-#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3)
-/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask;
-#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \
- vxge_vBIT(val, 2, 6)
-/*0x00020*/ u64 msixgrp_no;
-#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11)
-
-} __packed;
-
-struct vxge_hw_mrpcim_reg {
-/*0x00000*/ u64 g3fbct_int_status;
-#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x00008*/ u64 g3fbct_int_mask;
-/*0x00010*/ u64 g3fbct_err_reg;
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
-#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
-/*0x00018*/ u64 g3fbct_err_mask;
-/*0x00020*/ u64 g3fbct_err_alarm;
-
- u8 unused00a00[0x00a00-0x00028];
-
-/*0x00a00*/ u64 wrdma_int_status;
-#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2)
-#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8)
-#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9)
-#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12)
-#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16)
-#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17)
-/*0x00a08*/ u64 wrdma_int_mask;
-/*0x00a10*/ u64 rc_alarm_reg;
-#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1)
-#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3)
-#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5)
-#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6)
-#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8)
-#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9)
-#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10)
-#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12)
-/*0x00a18*/ u64 rc_alarm_mask;
-/*0x00a20*/ u64 rc_alarm_alarm;
-/*0x00a28*/ u64 rxdrm_sm_err_reg;
-#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a30*/ u64 rxdrm_sm_err_mask;
-/*0x00a38*/ u64 rxdrm_sm_err_alarm;
-/*0x00a40*/ u64 rxdcm_sm_err_reg;
-#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a48*/ u64 rxdcm_sm_err_mask;
-/*0x00a50*/ u64 rxdcm_sm_err_alarm;
-/*0x00a58*/ u64 rxdwm_sm_err_reg;
-#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
-/*0x00a60*/ u64 rxdwm_sm_err_mask;
-/*0x00a68*/ u64 rxdwm_sm_err_alarm;
-/*0x00a70*/ u64 rda_err_reg;
-#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0)
-#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1)
-#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2)
-#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3)
-#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4)
-#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5)
-#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6)
-#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7)
-/*0x00a78*/ u64 rda_err_mask;
-/*0x00a80*/ u64 rda_err_alarm;
-/*0x00a88*/ u64 rda_ecc_db_reg;
-#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
-/*0x00a90*/ u64 rda_ecc_db_mask;
-/*0x00a98*/ u64 rda_ecc_db_alarm;
-/*0x00aa0*/ u64 rda_ecc_sg_reg;
-#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
-/*0x00aa8*/ u64 rda_ecc_sg_mask;
-/*0x00ab0*/ u64 rda_ecc_sg_alarm;
-/*0x00ab8*/ u64 rqa_err_reg;
-#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0)
-/*0x00ac0*/ u64 rqa_err_mask;
-/*0x00ac8*/ u64 rqa_err_alarm;
-/*0x00ad0*/ u64 frf_alarm_reg;
-#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n)
-/*0x00ad8*/ u64 frf_alarm_mask;
-/*0x00ae0*/ u64 frf_alarm_alarm;
-/*0x00ae8*/ u64 rocrc_alarm_reg;
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4)
-#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5)
-#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17)
-#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20)
-#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21)
-#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22)
-/*0x00af0*/ u64 rocrc_alarm_mask;
-/*0x00af8*/ u64 rocrc_alarm_alarm;
-/*0x00b00*/ u64 wde0_alarm_reg;
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b08*/ u64 wde0_alarm_mask;
-/*0x00b10*/ u64 wde0_alarm_alarm;
-/*0x00b18*/ u64 wde1_alarm_reg;
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b20*/ u64 wde1_alarm_mask;
-/*0x00b28*/ u64 wde1_alarm_alarm;
-/*0x00b30*/ u64 wde2_alarm_reg;
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b38*/ u64 wde2_alarm_mask;
-/*0x00b40*/ u64 wde2_alarm_alarm;
-/*0x00b48*/ u64 wde3_alarm_reg;
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3)
-#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4)
-/*0x00b50*/ u64 wde3_alarm_mask;
-/*0x00b58*/ u64 wde3_alarm_alarm;
-
- u8 unused00be8[0x00be8-0x00b60];
-
-/*0x00be8*/ u64 rx_w_round_robin_0;
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5)
-/*0x00bf0*/ u64 rx_w_round_robin_1;
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00bf8*/ u64 rx_w_round_robin_2;
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c00*/ u64 rx_w_round_robin_3;
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c08*/ u64 rx_w_round_robin_4;
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c10*/ u64 rx_w_round_robin_5;
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c18*/ u64 rx_w_round_robin_6;
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c20*/ u64 rx_w_round_robin_7;
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c28*/ u64 rx_w_round_robin_8;
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c30*/ u64 rx_w_round_robin_9;
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c38*/ u64 rx_w_round_robin_10;
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c40*/ u64 rx_w_round_robin_11;
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c48*/ u64 rx_w_round_robin_12;
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c50*/ u64 rx_w_round_robin_13;
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c58*/ u64 rx_w_round_robin_14;
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c60*/ u64 rx_w_round_robin_15;
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c68*/ u64 rx_w_round_robin_16;
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c70*/ u64 rx_w_round_robin_17;
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c78*/ u64 rx_w_round_robin_18;
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c80*/ u64 rx_w_round_robin_19;
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c88*/ u64 rx_w_round_robin_20;
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \
- vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \
- vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \
- vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00c90*/ u64 rx_w_round_robin_21;
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \
- vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \
- vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \
- vxge_vBIT(val, 19, 5)
-
-#define VXGE_HW_WRR_RING_SERVICE_STATES 171
-#define VXGE_HW_WRR_RING_COUNT 22
-
-/*0x00c98*/ u64 rx_queue_priority_0;
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-/*0x00ca0*/ u64 rx_queue_priority_1;
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5)
-/*0x00ca8*/ u64 rx_queue_priority_2;
-#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5)
- u8 unused00cc8[0x00cc8-0x00cb0];
-
-/*0x00cc8*/ u64 replication_queue_priority;
-#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00cd0*/ u64 rx_queue_select;
-#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n)
-#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15)
-#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23)
-/*0x00cd8*/ u64 rqa_vpbp_ctrl;
-#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15)
-#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23)
-#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31)
-/*0x00ce0*/ u64 rx_multi_cast_ctrl;
-#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \
- vxge_vBIT(val, 2, 30)
-#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32)
-/*0x00ce8*/ u64 wde_prm_ctrl;
-#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32)
-#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33)
-#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2)
-/*0x00cf0*/ u64 noa_ctrl;
-#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4)
-#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4)
-/*0x00cf8*/ u64 phase_cfg;
-#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0)
-#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3)
-#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7)
-#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11)
-#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15)
-#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19)
-#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23)
-#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27)
-#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31)
-#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35)
-#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39)
-#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43)
-/*0x00d00*/ u64 rcq_bypq_cfg;
-#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22)
-#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9)
-#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9)
- u8 unused00e00[0x00e00-0x00d08];
-
-/*0x00e00*/ u64 doorbell_int_status;
-#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7)
-#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15)
-/*0x00e08*/ u64 doorbell_int_mask;
-/*0x00e10*/ u64 kdfc_err_reg;
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
-#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
-/*0x00e18*/ u64 kdfc_err_mask;
-/*0x00e20*/ u64 kdfc_err_reg_alarm;
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
-#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
- u8 unused00e40[0x00e40-0x00e28];
-/*0x00e40*/ u64 kdfc_vp_partition_0;
-#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0)
-#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15)
-/*0x00e48*/ u64 kdfc_vp_partition_1;
-#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15)
-/*0x00e50*/ u64 kdfc_vp_partition_2;
-#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15)
-/*0x00e58*/ u64 kdfc_vp_partition_3;
-#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15)
-/*0x00e60*/ u64 kdfc_vp_partition_4;
-#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15)
-/*0x00e68*/ u64 kdfc_vp_partition_5;
-#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15)
-/*0x00e70*/ u64 kdfc_vp_partition_6;
-#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15)
-/*0x00e78*/ u64 kdfc_vp_partition_7;
-#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15)
-/*0x00e80*/ u64 kdfc_vp_partition_8;
-#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15)
-/*0x00e88*/ u64 kdfc_w_round_robin_0;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
- u8 unused0f28[0x0f28-0x0e90];
-
-/*0x00f28*/ u64 kdfc_w_round_robin_20;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
-#define VXGE_HW_WRR_FIFO_COUNT 20
-
- u8 unused0fc8[0x0fc8-0x0f30];
-
-/*0x00fc8*/ u64 kdfc_w_round_robin_40;
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5)
-#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5)
-
- u8 unused1068[0x01068-0x0fd0];
-
-/*0x01068*/ u64 kdfc_entry_type_sel_0;
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2)
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2)
-/*0x01070*/ u64 kdfc_entry_type_sel_1;
-#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2)
-/*0x01078*/ u64 kdfc_fifo_0_ctrl;
-#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176
-#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153
-
- u8 unused1100[0x01100-0x1080];
-
-/*0x01100*/ u64 kdfc_fifo_17_ctrl;
-#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
-
- u8 unused1600[0x01600-0x1108];
-
-/*0x01600*/ u64 rxmac_int_status;
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7)
-#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \
- vxge_mBIT(11)
-/*0x01608*/ u64 rxmac_int_mask;
- u8 unused01618[0x01618-0x01610];
-
-/*0x01618*/ u64 rxmac_gen_err_reg;
-/*0x01620*/ u64 rxmac_gen_err_mask;
-/*0x01628*/ u64 rxmac_gen_err_alarm;
-/*0x01630*/ u64 rxmac_ecc_err_reg;
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \
- vxge_vBIT(val, 24, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \
- vxge_vBIT(val, 26, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \
- vxge_vBIT(val, 28, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \
- vxge_vBIT(val, 30, 2)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \
- vxge_vBIT(val, 40, 7)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \
- vxge_vBIT(val, 47, 7)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \
- vxge_vBIT(val, 54, 3)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \
- vxge_vBIT(val, 57, 3)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \
- vxge_mBIT(60)
-#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \
- vxge_mBIT(61)
-/*0x01638*/ u64 rxmac_ecc_err_mask;
-/*0x01640*/ u64 rxmac_ecc_err_alarm;
-/*0x01648*/ u64 rxmac_various_err_reg;
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2)
-#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3)
-/*0x01650*/ u64 rxmac_various_err_mask;
-/*0x01658*/ u64 rxmac_various_err_alarm;
-/*0x01660*/ u64 rxmac_gen_cfg;
-#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11)
-/*0x01668*/ u64 rxmac_authorize_all_addr;
-#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n)
-/*0x01670*/ u64 rxmac_authorize_all_vid;
-#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n)
- u8 unused016c0[0x016c0-0x01678];
-
-/*0x016c0*/ u64 rxmac_red_rate_repl_queue;
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35)
- u8 unused016e0[0x016e0-0x016c8];
-
-/*0x016e0*/ u64 rxmac_cfg0_port[3];
-#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7)
-#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23)
-#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27)
-#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14)
- u8 unused01710[0x01710-0x016f8];
-
-/*0x01710*/ u64 rxmac_cfg2_port[3];
-#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3)
-/*0x01728*/ u64 rxmac_pause_cfg_port[3];
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59)
- u8 unused01758[0x01758-0x01740];
-
-/*0x01758*/ u64 rxmac_red_cfg0_port[3];
-#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n)
-/*0x01770*/ u64 rxmac_red_cfg1_port[3];
-#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11)
-/*0x01788*/ u64 rxmac_red_cfg2_port[3];
-#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n)
-/*0x017a0*/ u64 rxmac_link_util_port[3];
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \
- vxge_vBIT(val, 1, 7)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23)
- u8 unused017d0[0x017d0-0x017b8];
-
-/*0x017d0*/ u64 rxmac_status_port[3];
-#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3)
- u8 unused01800[0x01800-0x017e8];
-
-/*0x01800*/ u64 rxmac_rx_pa_cfg0;
-#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63)
-/*0x01808*/ u64 rxmac_rx_pa_cfg1;
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23)
- u8 unused01828[0x01828-0x01810];
-
-/*0x01828*/ u64 rts_mgr_cfg0;
-#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35)
-#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39)
-#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51)
-#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
-#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59)
-/*0x01830*/ u64 rts_mgr_cfg1;
-#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7)
-/*0x01838*/ u64 rts_mgr_criteria_priority;
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3)
-/*0x01840*/ u64 rts_mgr_da_pause_cfg;
-#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17)
-/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg;
-#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused01890[0x01890-0x01850];
-/*0x01890*/ u64 rts_mgr_cbasin_cfg;
- u8 unused01968[0x01968-0x01898];
-
-/*0x01968*/ u64 dbg_stat_rx_any_frms;
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused01a00[0x01a00-0x01970];
-
-/*0x01a00*/ u64 rxmac_red_rate_vp[17];
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
- u8 unused01e00[0x01e00-0x01a88];
-
-/*0x01e00*/ u64 xgmac_int_status;
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \
- vxge_mBIT(7)
-#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \
- vxge_mBIT(11)
-#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15)
-#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19)
-#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23)
-/*0x01e08*/ u64 xgmac_int_mask;
-/*0x01e10*/ u64 xmac_gen_err_reg;
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \
- vxge_mBIT(11)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \
- vxge_mBIT(19)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \
- vxge_mBIT(23)
-#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \
- vxge_vBIT(val, 40, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \
- vxge_vBIT(val, 42, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \
- vxge_vBIT(val, 44, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \
- vxge_vBIT(val, 46, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \
- vxge_vBIT(val, 48, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \
- vxge_vBIT(val, 50, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \
- vxge_vBIT(val, 52, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \
- vxge_vBIT(val, 54, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \
- vxge_vBIT(val, 56, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \
- vxge_vBIT(val, 58, 2)
-#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
-/*0x01e18*/ u64 xmac_gen_err_mask;
-/*0x01e20*/ u64 xmac_gen_err_alarm;
-/*0x01e28*/ u64 xmac_link_err_port0_reg;
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \
- vxge_mBIT(19)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
-#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
- vxge_mBIT(47)
-/*0x01e30*/ u64 xmac_link_err_port0_mask;
-/*0x01e38*/ u64 xmac_link_err_port0_alarm;
-/*0x01e40*/ u64 xmac_link_err_port1_reg;
-/*0x01e48*/ u64 xmac_link_err_port1_mask;
-/*0x01e50*/ u64 xmac_link_err_port1_alarm;
-/*0x01e58*/ u64 xgxs_gen_err_reg;
-#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
-/*0x01e60*/ u64 xgxs_gen_err_mask;
-/*0x01e68*/ u64 xgxs_gen_err_alarm;
-/*0x01e70*/ u64 asic_ntwk_err_reg;
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19)
-#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
-/*0x01e78*/ u64 asic_ntwk_err_mask;
-/*0x01e80*/ u64 asic_ntwk_err_alarm;
-/*0x01e88*/ u64 asic_gpio_err_reg;
-#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n)
-/*0x01e90*/ u64 asic_gpio_err_mask;
-/*0x01e98*/ u64 asic_gpio_err_alarm;
-/*0x01ea0*/ u64 xgmac_gen_status;
-#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3)
-#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11)
-/*0x01ea8*/ u64 xgmac_gen_fw_memo_status;
-#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask;
-#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64)
-/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status;
-#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \
- vxge_vBIT(val, 0, 17)
-/*0x01ec0*/ u64 xgmac_main_cfg_port[2];
-#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3)
- u8 unused01f40[0x01f40-0x01ed0];
-
-/*0x01f40*/ u64 xmac_gen_cfg;
-#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4)
-/*0x01f48*/ u64 xmac_timestamp;
-#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3)
-#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19)
-#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16)
-/*0x01f50*/ u64 xmac_stats_gen_cfg;
-#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15)
-/*0x01f58*/ u64 xmac_stats_sys_cmd;
-#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5)
-#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
-/*0x01f60*/ u64 xmac_stats_sys_data;
-#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
- u8 unused01f80[0x01f80-0x01f68];
-
-/*0x01f80*/ u64 asic_ntwk_ctrl;
-#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11)
-#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15)
-/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info;
-#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n)
-/*0x01f90*/ u64 asic_ntwk_cfg_port_num;
-#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n)
-/*0x01f98*/ u64 xmac_cfg_port[3];
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11)
-#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15)
-/*0x01fb0*/ u64 xmac_station_addr_port[2];
-#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
- u8 unused02020[0x02020-0x01fc0];
-
-/*0x02020*/ u64 lag_cfg;
-#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11)
-#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15)
-#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19)
-/*0x02028*/ u64 lag_status;
-#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3)
-#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \
- vxge_vBIT(val, 8, 8)
-/*0x02030*/ u64 lag_active_passive_cfg;
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19)
-#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \
- vxge_vBIT(val, 32, 16)
- u8 unused02040[0x02040-0x02038];
-
-/*0x02040*/ u64 lag_lacp_cfg;
-#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7)
-#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11)
-#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15)
-/*0x02048*/ u64 lag_timer_cfg_1;
-#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16)
-/*0x02050*/ u64 lag_timer_cfg_2;
-#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16)
-/*0x02058*/ u64 lag_sys_id;
-#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51)
-#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55)
-/*0x02060*/ u64 lag_sys_cfg;
-#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
- u8 unused02070[0x02070-0x02068];
-
-/*0x02070*/ u64 lag_aggr_addr_cfg[2];
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48)
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51)
-#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55)
-/*0x02080*/ u64 lag_aggr_id_cfg[2];
-#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16)
-/*0x02090*/ u64 lag_aggr_admin_key[2];
-#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x020a0*/ u64 lag_aggr_alt_admin_key;
-#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19)
-/*0x020a8*/ u64 lag_aggr_oper_key[2];
-#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x020b8*/ u64 lag_aggr_partner_sys_id[2];
-#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48)
-/*0x020c8*/ u64 lag_aggr_partner_info[2];
-#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \
- vxge_vBIT(val, 16, 16)
-/*0x020d8*/ u64 lag_aggr_state[2];
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11)
-#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15)
- u8 unused020f0[0x020f0-0x020e8];
-
-/*0x020f0*/ u64 lag_port_cfg[2];
-#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15)
-/*0x02100*/ u64 lag_port_actor_admin_cfg[2];
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16)
-/*0x02110*/ u64 lag_port_actor_admin_state[2];
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31)
-/*0x02120*/ u64 lag_port_partner_admin_sys_id[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
-/*0x02130*/ u64 lag_port_partner_admin_cfg[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \
- vxge_vBIT(val, 48, 16)
-/*0x02140*/ u64 lag_port_partner_admin_state[2];
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31)
-/*0x02150*/ u64 lag_port_to_aggr[2];
-#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19)
-/*0x02160*/ u64 lag_port_actor_oper_key[2];
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
-/*0x02170*/ u64 lag_port_actor_oper_state[2];
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
-/*0x02180*/ u64 lag_port_partner_oper_sys_id[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \
- vxge_vBIT(val, 0, 48)
-/*0x02190*/ u64 lag_port_partner_oper_info[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \
- vxge_vBIT(val, 48, 16)
-/*0x021a0*/ u64 lag_port_partner_oper_state[2];
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \
- vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
-/*0x021b0*/ u64 lag_port_state_vars[2];
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \
- vxge_mBIT(32)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \
- vxge_mBIT(33)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \
- vxge_vBIT(val, 41, 3)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \
- vxge_vBIT(val, 56, 4)
-#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \
- vxge_vBIT(val, 60, 4)
-/*0x021c0*/ u64 lag_port_timer_cntr[2];
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \
- vxge_vBIT(val, 8, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \
- vxge_vBIT(val, 32, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \
- vxge_vBIT(val, 40, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \
- vxge_vBIT(val, 56, 8)
- u8 unused02208[0x02700-0x021d0];
-
-/*0x02700*/ u64 rtdma_int_status;
-#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1)
-#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2)
-#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4)
-#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5)
-/*0x02708*/ u64 rtdma_int_mask;
-/*0x02710*/ u64 pda_alarm_reg;
-#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0)
-#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1)
-/*0x02718*/ u64 pda_alarm_mask;
-/*0x02720*/ u64 pda_alarm_alarm;
-/*0x02728*/ u64 pcc_error_reg;
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n)
-/*0x02730*/ u64 pcc_error_mask;
-/*0x02738*/ u64 pcc_error_alarm;
-/*0x02740*/ u64 lso_error_reg;
-#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n)
-#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n)
-/*0x02748*/ u64 lso_error_mask;
-/*0x02750*/ u64 lso_error_alarm;
-/*0x02758*/ u64 sm_error_reg;
-#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15)
-/*0x02760*/ u64 sm_error_mask;
-/*0x02768*/ u64 sm_error_alarm;
-
- u8 unused027a8[0x027a8-0x02770];
-
-/*0x027a8*/ u64 txd_ownership_ctrl;
-#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7)
-/*0x027b0*/ u64 pcc_cfg;
-#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n)
-#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n)
-/*0x027b8*/ u64 pcc_control;
-#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15)
-#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31)
-/*0x027c0*/ u64 pda_status1;
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4)
-#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4)
-/*0x027c8*/ u64 rtdma_bw_timer;
-#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4)
-
- u8 unused02900[0x02900-0x027d0];
-/*0x02900*/ u64 g3cmct_int_status;
-#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x02908*/ u64 g3cmct_int_mask;
-/*0x02910*/ u64 g3cmct_err_reg;
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
-#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
-/*0x02918*/ u64 g3cmct_err_mask;
-/*0x02920*/ u64 g3cmct_err_alarm;
- u8 unused03000[0x03000-0x02928];
-
-/*0x03000*/ u64 mc_int_status;
-#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3)
-#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7)
-#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11)
-#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15)
-/*0x03008*/ u64 mc_int_mask;
-/*0x03010*/ u64 mc_err_reg;
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4)
-#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10)
-#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11)
-#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13)
-#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14)
-#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15)
-/*0x03018*/ u64 mc_err_mask;
-/*0x03020*/ u64 mc_err_alarm;
-/*0x03028*/ u64 grocrc_alarm_reg;
-#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3)
-#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7)
-/*0x03030*/ u64 grocrc_alarm_mask;
-/*0x03038*/ u64 grocrc_alarm_alarm;
- u8 unused03100[0x03100-0x03040];
-
-/*0x03100*/ u64 rx_thresh_cfg_repl;
-#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8)
-#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62)
-#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63)
- u8 unused033b8[0x033b8-0x03108];
-
-/*0x033b8*/ u64 fbmc_ecc_cfg;
-#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5)
- u8 unused03400[0x03400-0x033c0];
-
-/*0x03400*/ u64 pcipif_int_status;
-#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3)
-#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7)
-#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11)
-#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15)
-#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \
- vxge_mBIT(19)
-/*0x03408*/ u64 pcipif_int_mask;
-/*0x03410*/ u64 dbecc_err_reg;
-#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3)
-#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11)
-#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19)
-#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23)
-/*0x03418*/ u64 dbecc_err_mask;
-/*0x03420*/ u64 dbecc_err_alarm;
-/*0x03428*/ u64 sbecc_err_reg;
-#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15)
-#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23)
-/*0x03430*/ u64 sbecc_err_mask;
-/*0x03438*/ u64 sbecc_err_alarm;
-/*0x03440*/ u64 general_err_reg;
-#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23)
-#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27)
-/*0x03448*/ u64 general_err_mask;
-/*0x03450*/ u64 general_err_alarm;
-/*0x03458*/ u64 srpcim_msg_reg;
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \
- vxge_mBIT(0)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \
- vxge_mBIT(1)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \
- vxge_mBIT(2)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \
- vxge_mBIT(3)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \
- vxge_mBIT(4)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \
- vxge_mBIT(5)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \
- vxge_mBIT(6)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \
- vxge_mBIT(7)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \
- vxge_mBIT(8)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \
- vxge_mBIT(9)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \
- vxge_mBIT(10)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \
- vxge_mBIT(11)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \
- vxge_mBIT(12)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \
- vxge_mBIT(13)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \
- vxge_mBIT(14)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \
- vxge_mBIT(15)
-#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \
- vxge_mBIT(16)
-/*0x03460*/ u64 srpcim_msg_mask;
-/*0x03468*/ u64 srpcim_msg_alarm;
- u8 unused03600[0x03600-0x03470];
-
-/*0x03600*/ u64 gcmg1_int_status;
-#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5)
-#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6)
-#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7)
-#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8)
-/*0x03608*/ u64 gcmg1_int_mask;
- u8 unused03a00[0x03a00-0x03610];
-
-/*0x03a00*/ u64 pcmg1_int_status;
-#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2)
-#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3)
-/*0x03a08*/ u64 pcmg1_int_mask;
- u8 unused04000[0x04000-0x03a10];
-
-/*0x04000*/ u64 one_int_status;
-#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \
- vxge_mBIT(13)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \
- vxge_mBIT(14)
-#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15)
-#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23)
-#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31)
-#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39)
-#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47)
-#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55)
-/*0x04008*/ u64 one_int_mask;
- u8 unused04818[0x04818-0x04010];
-
-/*0x04818*/ u64 noa_wct_ctrl;
-#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0)
-/*0x04820*/ u64 rc_cfg2;
-#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16)
-/*0x04828*/ u64 rc_cfg3;
-#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16)
-/*0x04830*/ u64 rx_multi_cast_ctrl1;
-#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7)
-#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5)
-/*0x04838*/ u64 rxdm_dbg_rd;
-#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12)
-#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31)
-/*0x04840*/ u64 rxdm_dbg_rd_data;
-#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x04848*/ u64 rqa_top_prty_for_vh[17];
-#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
- vxge_vBIT(val, 59, 5)
- u8 unused04900[0x04900-0x048d0];
-
-/*0x04900*/ u64 tim_status;
-#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0)
-/*0x04908*/ u64 tim_ecc_enable;
-#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7)
-#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15)
-#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23)
-/*0x04910*/ u64 tim_bp_ctrl;
-#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7)
-#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15)
-#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23)
-/*0x04918*/ u64 tim_resource_assignment_vh[17];
-#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
-/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17];
-#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5)
- u8 unused04b00[0x04b00-0x04a28];
-
-/*0x04b00*/ u64 gcmg2_int_status;
-#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7)
-#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15)
-#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23)
-/*0x04b08*/ u64 gcmg2_int_mask;
-/*0x04b10*/ u64 gxtmc_err_reg;
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \
- vxge_mBIT(21)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \
- vxge_mBIT(22)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \
- vxge_mBIT(24)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \
- vxge_mBIT(25)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34)
-#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35)
-/*0x04b18*/ u64 gxtmc_err_mask;
-/*0x04b20*/ u64 gxtmc_err_alarm;
-/*0x04b28*/ u64 cmc_err_reg;
-#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0)
-/*0x04b30*/ u64 cmc_err_mask;
-/*0x04b38*/ u64 cmc_err_alarm;
-/*0x04b40*/ u64 gcp_err_reg;
-#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0)
-#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1)
-#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2)
-#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3)
-/*0x04b48*/ u64 gcp_err_mask;
-/*0x04b50*/ u64 gcp_err_alarm;
- u8 unused04f00[0x04f00-0x04b58];
-
-/*0x04f00*/ u64 pcmg2_int_status;
-#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7)
-#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15)
-#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23)
-/*0x04f08*/ u64 pcmg2_int_mask;
-/*0x04f10*/ u64 pxtmc_err_reg;
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56)
-#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57)
-/*0x04f18*/ u64 pxtmc_err_mask;
-/*0x04f20*/ u64 pxtmc_err_alarm;
-/*0x04f28*/ u64 cp_err_reg;
-#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2)
-#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34)
-#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2)
-#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48)
-#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49)
-#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50)
-#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51)
-#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52)
-#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53)
-#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54)
-#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55)
-#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56)
-#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57)
-#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60)
-#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61)
-#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62)
-#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63)
-/*0x04f30*/ u64 cp_err_mask;
-/*0x04f38*/ u64 cp_err_alarm;
- u8 unused04fe8[0x04f50-0x04f40];
-
-/*0x04f50*/ u64 cp_exc_reg;
-#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47)
-#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55)
-#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63)
-/*0x04f58*/ u64 cp_exc_mask;
-/*0x04f60*/ u64 cp_exc_alarm;
-/*0x04f68*/ u64 cp_exc_cause;
-#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32)
- u8 unused05200[0x05200-0x04f70];
-
-/*0x05200*/ u64 msg_int_status;
-#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7)
-#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62)
-#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63)
-/*0x05208*/ u64 msg_int_mask;
-/*0x05210*/ u64 tim_err_reg;
-#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7)
-#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19)
-#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20)
-#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22)
-#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23)
-#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46)
-#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n)
-/*0x05218*/ u64 tim_err_mask;
-/*0x05220*/ u64 tim_err_alarm;
-/*0x05228*/ u64 msg_err_reg;
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \
- vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31)
-#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \
- vxge_mBIT(36)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45)
-#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47)
-#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62)
-#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63)
-/*0x05230*/ u64 msg_err_mask;
-/*0x05238*/ u64 msg_err_alarm;
- u8 unused05340[0x05340-0x05240];
-
-/*0x05340*/ u64 msg_exc_reg;
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50)
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55)
-#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62)
-#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63)
-/*0x05348*/ u64 msg_exc_mask;
-/*0x05350*/ u64 msg_exc_alarm;
-/*0x05358*/ u64 msg_exc_cause;
-#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32)
- u8 unused05368[0x05380-0x05360];
-
-/*0x05380*/ u64 msg_err2_reg;
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \
- vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \
- vxge_mBIT(12)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \
- vxge_mBIT(13)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \
- vxge_mBIT(14)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \
- vxge_mBIT(15)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \
- vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \
- vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \
- vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \
- vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \
- vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \
- vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \
- vxge_mBIT(22)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \
- vxge_mBIT(23)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \
- vxge_mBIT(24)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \
- vxge_mBIT(25)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \
- vxge_mBIT(26)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \
- vxge_mBIT(27)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \
- vxge_mBIT(28)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(30)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(31)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
- vxge_mBIT(32)
-#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62)
-#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63)
-/*0x05388*/ u64 msg_err2_mask;
-/*0x05390*/ u64 msg_err2_alarm;
-/*0x05398*/ u64 msg_err3_reg;
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40)
-#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56)
-#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57)
-/*0x053a0*/ u64 msg_err3_mask;
-/*0x053a8*/ u64 msg_err3_alarm;
- u8 unused05600[0x05600-0x053b0];
-
-/*0x05600*/ u64 fau_gen_err_reg;
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3)
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7)
-#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11)
-#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15)
-/*0x05608*/ u64 fau_gen_err_mask;
-/*0x05610*/ u64 fau_gen_err_alarm;
-/*0x05618*/ u64 fau_ecc_err_reg;
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 2, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 4, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 8, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 10, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \
- vxge_vBIT(val, 14, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \
- vxge_vBIT(val, 16, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \
- vxge_vBIT(val, 18, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \
- vxge_vBIT(val, 20, 2)
-#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31)
-/*0x05620*/ u64 fau_ecc_err_mask;
-/*0x05628*/ u64 fau_ecc_err_alarm;
- u8 unused05658[0x05658-0x05630];
-/*0x05658*/ u64 fau_pa_cfg;
-#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3)
-#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7)
-#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11)
- u8 unused05668[0x05668-0x05660];
-
-/*0x05668*/ u64 dbg_stats_fau_rx_path;
-#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused056c0[0x056c0-0x05670];
-
-/*0x056c0*/ u64 fau_lag_cfg;
-#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7)
- u8 unused05800[0x05800-0x056c8];
-
-/*0x05800*/ u64 tpa_int_status;
-#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15)
-#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23)
-#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31)
-/*0x05808*/ u64 tpa_int_mask;
-/*0x05810*/ u64 orp_err_reg;
-#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11)
-#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39)
-#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43)
-#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47)
-/*0x05818*/ u64 orp_err_mask;
-/*0x05820*/ u64 orp_err_alarm;
-/*0x05828*/ u64 ptm_alarm_reg;
-#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3)
-#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7)
-#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11)
-#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15)
-#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2)
-#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2)
-/*0x05830*/ u64 ptm_alarm_mask;
-/*0x05838*/ u64 ptm_alarm_alarm;
-/*0x05840*/ u64 tpa_error_reg;
-#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3)
-#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11)
-/*0x05848*/ u64 tpa_error_mask;
-/*0x05850*/ u64 tpa_error_alarm;
-/*0x05858*/ u64 tpa_global_cfg;
-#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35)
- u8 unused05868[0x05870-0x05860];
-
-/*0x05870*/ u64 ptm_ecc_cfg;
-#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3)
-/*0x05878*/ u64 ptm_phase_cfg;
-#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3)
-#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7)
- u8 unused05898[0x05898-0x05880];
-
-/*0x05898*/ u64 dbg_stats_tpa_tx_path;
-#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused05900[0x05900-0x058a0];
-
-/*0x05900*/ u64 tmac_int_status;
-#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3)
-#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7)
-/*0x05908*/ u64 tmac_int_mask;
-/*0x05910*/ u64 txmac_gen_err_reg;
-#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3)
-#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7)
-/*0x05918*/ u64 txmac_gen_err_mask;
-/*0x05920*/ u64 txmac_gen_err_alarm;
-/*0x05928*/ u64 txmac_ecc_err_reg;
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39)
-/*0x05930*/ u64 txmac_ecc_err_mask;
-/*0x05938*/ u64 txmac_ecc_err_alarm;
- u8 unused05978[0x05978-0x05940];
-
-/*0x05978*/ u64 dbg_stat_tx_any_frms;
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused059a0[0x059a0-0x05980];
-
-/*0x059a0*/ u64 txmac_link_util_port[3];
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \
- vxge_vBIT(val, 1, 7)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23)
-/*0x059b8*/ u64 txmac_cfg0_port[3];
-#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7)
-#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
-/*0x059d0*/ u64 txmac_cfg1_port[3];
-#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8)
-/*0x059e8*/ u64 txmac_status_port[3];
-#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3)
- u8 unused05a20[0x05a20-0x05a00];
-
-/*0x05a20*/ u64 lag_distrib_dest;
-#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n)
-/*0x05a28*/ u64 lag_marker_cfg;
-#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3)
-#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7)
-#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51)
-/*0x05a30*/ u64 lag_tx_cfg;
-#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3)
-#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11)
-#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16)
-/*0x05a38*/ u64 lag_tx_status;
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \
- vxge_vBIT(val, 0, 8)
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \
- vxge_vBIT(val, 8, 8)
-#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \
- vxge_vBIT(val, 16, 8)
- u8 unused05d48[0x05d48-0x05a40];
-
-/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17];
-#define \
-VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\
- vxge_vBIT(val, 0, 64)
- u8 unused06420[0x06420-0x05dd0];
-
-/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17];
-#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17];
-
-/*0x06530*/ u64 debug_stats0;
-#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32)
-/*0x06538*/ u64 debug_stats1;
-#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32)
-/*0x06540*/ u64 debug_stats2;
-#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32)
-/*0x06548*/ u64 debug_stats3_vplane[17];
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16)
-/*0x065d0*/ u64 debug_stats4_vplane[17];
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16)
-
- u8 unused07000[0x07000-0x06658];
-
-/*0x07000*/ u64 mrpcim_general_int_status;
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21)
-#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22)
-/*0x07008*/ u64 mrpcim_general_int_mask;
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21)
-#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22)
-/*0x07010*/ u64 mrpcim_ppif_int_status;
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\
- vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\
- vxge_mBIT(32)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\
- vxge_mBIT(33)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\
- vxge_mBIT(34)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\
- vxge_mBIT(35)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\
- vxge_mBIT(36)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\
- vxge_mBIT(37)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\
- vxge_mBIT(38)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\
- vxge_mBIT(39)
-#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\
- vxge_mBIT(40)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \
- vxge_mBIT(41)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \
- vxge_mBIT(42)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \
- vxge_mBIT(43)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \
- vxge_mBIT(44)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \
- vxge_mBIT(45)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \
- vxge_mBIT(46)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \
- vxge_mBIT(47)
-#define \
-VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \
- vxge_mBIT(55)
-/*0x07018*/ u64 mrpcim_ppif_int_mask;
- u8 unused07028[0x07028-0x07020];
-
-/*0x07028*/ u64 ini_errors_reg;
-#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3)
-#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15)
-#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19)
-#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23)
-#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27)
-#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31)
-#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35)
-#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39)
-#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43)
-#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47)
-#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51)
-#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55)
-#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59)
-#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63)
-/*0x07030*/ u64 ini_errors_mask;
-/*0x07038*/ u64 ini_errors_alarm;
-/*0x07040*/ u64 dma_errors_reg;
-#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3)
-#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28)
-#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33)
-#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34)
-/*0x07048*/ u64 dma_errors_mask;
-/*0x07050*/ u64 dma_errors_alarm;
-/*0x07058*/ u64 tgt_errors_reg;
-#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20)
-#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21)
-/*0x07060*/ u64 tgt_errors_mask;
-/*0x07068*/ u64 tgt_errors_alarm;
-/*0x07070*/ u64 config_errors_reg;
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23)
-#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27)
-#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35)
-#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55)
-#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59)
-#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63)
-/*0x07078*/ u64 config_errors_mask;
-/*0x07080*/ u64 config_errors_alarm;
- u8 unused07090[0x07090-0x07088];
-
-/*0x07090*/ u64 crdt_errors_reg;
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \
- vxge_mBIT(15)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19)
-#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \
- vxge_mBIT(23)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43)
-#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \
- vxge_mBIT(47)
-/*0x07098*/ u64 crdt_errors_mask;
-/*0x070a0*/ u64 crdt_errors_alarm;
- u8 unused070b0[0x070b0-0x070a8];
-
-/*0x070b0*/ u64 mrpcim_general_errors_reg;
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \
- vxge_mBIT(47)
-#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51)
-/*0x070b8*/ u64 mrpcim_general_errors_mask;
-/*0x070c0*/ u64 mrpcim_general_errors_alarm;
- u8 unused070d0[0x070d0-0x070c8];
-
-/*0x070d0*/ u64 pll_errors_reg;
-#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3)
-#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7)
-#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11)
-/*0x070d8*/ u64 pll_errors_mask;
-/*0x070e0*/ u64 pll_errors_alarm;
-/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask;
-/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm;
-/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg;
-#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask;
-/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm;
- u8 unused07128[0x07128-0x07118];
-
-/*0x07128*/ u64 crdt_errors_vplane_reg[17];
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \
- vxge_mBIT(3)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \
- vxge_mBIT(7)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \
- vxge_mBIT(11)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \
- vxge_mBIT(15)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \
- vxge_mBIT(19)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \
- vxge_mBIT(23)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \
- vxge_mBIT(27)
-#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \
- vxge_mBIT(31)
-/*0x07130*/ u64 crdt_errors_vplane_mask[17];
-/*0x07138*/ u64 crdt_errors_vplane_alarm[17];
- u8 unused072f0[0x072f0-0x072c0];
-
-/*0x072f0*/ u64 mrpcim_rst_in_prog;
-#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7)
-/*0x072f8*/ u64 mrpcim_reg_modified;
-#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7)
-
- u8 unused07378[0x07378-0x07300];
-
-/*0x07378*/ u64 write_arb_pending;
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15)
-#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19)
-/*0x07380*/ u64 read_arb_pending;
-#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3)
-#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7)
-#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11)
-/*0x07388*/ u64 dmaif_dmadbl_pending;
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5)
-#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \
- vxge_vBIT(val, 13, 51)
-/*0x07390*/ u64 wrcrdtarb_status0_vplane[17];
-#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \
- vxge_vBIT(val, 0, 8)
-/*0x07418*/ u64 wrcrdtarb_status1_vplane[17];
-#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \
- vxge_vBIT(val, 4, 12)
- u8 unused07500[0x07500-0x074a0];
-
-/*0x07500*/ u64 mrpcim_general_cfg1;
-#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7)
-/*0x07508*/ u64 mrpcim_general_cfg2;
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \
- vxge_vBIT(val, 47, 5)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59)
-#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63)
-/*0x07510*/ u64 mrpcim_general_cfg3;
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \
- vxge_vBIT(val, 36, 16)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59)
-#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63)
-/*0x07518*/ u64 mrpcim_stats_start_host_addr;
-#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\
- vxge_vBIT(val, 0, 57)
-
- u8 unused07950[0x07950-0x07520];
-
-/*0x07950*/ u64 rdcrdtarb_cfg0;
-#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 18, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 26, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \
- vxge_vBIT(val, 34, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4)
-#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6)
-#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63)
- u8 unused07be8[0x07be8-0x07958];
-
-/*0x07be8*/ u64 bf_sw_reset;
-#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8)
-/*0x07bf0*/ u64 sw_reset_status;
-#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7)
-#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15)
- u8 unused07d30[0x07d30-0x07bf8];
-
-/*0x07d30*/ u64 mrpcim_debug_stats0;
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32)
-/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17];
-#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07ed0*/ u64 mrpcim_debug_stats4;
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \
- vxge_vBIT(val, 32, 32)
-/*0x07ed8*/ u64 genstats_count01;
-#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32)
-/*0x07ee0*/ u64 genstats_count23;
-#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32)
-/*0x07ee8*/ u64 genstats_count4;
-#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32)
-/*0x07ef0*/ u64 genstats_count5;
-#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32)
-
- u8 unused07f08[0x07f08-0x07ef8];
-
-/*0x07f08*/ u64 genstats_cfg[6];
-#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5)
-#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3)
-#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17)
-/*0x07f38*/ u64 genstat_64bit_cfg;
-#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3)
-#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7)
- u8 unused08000[0x08000-0x07f40];
-/*0x08000*/ u64 gcmg3_int_status;
-#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0)
-#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1)
-#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4)
-#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5)
-#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6)
-/*0x08008*/ u64 gcmg3_int_mask;
- u8 unused09000[0x09000-0x8010];
-
-/*0x09000*/ u64 g3ifcmd_fb_int_status;
-#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09008*/ u64 g3ifcmd_fb_int_mask;
-/*0x09010*/ u64 g3ifcmd_fb_err_reg;
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09018*/ u64 g3ifcmd_fb_err_mask;
-/*0x09020*/ u64 g3ifcmd_fb_err_alarm;
-
- u8 unused09400[0x09400-0x09028];
-
-/*0x09400*/ u64 g3ifcmd_cmu_int_status;
-#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09408*/ u64 g3ifcmd_cmu_int_mask;
-/*0x09410*/ u64 g3ifcmd_cmu_err_reg;
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09418*/ u64 g3ifcmd_cmu_err_mask;
-/*0x09420*/ u64 g3ifcmd_cmu_err_alarm;
-
- u8 unused09800[0x09800-0x09428];
-
-/*0x09800*/ u64 g3ifcmd_cml_int_status;
-#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
-/*0x09808*/ u64 g3ifcmd_cml_int_mask;
-/*0x09810*/ u64 g3ifcmd_cml_err_reg;
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
-/*0x09818*/ u64 g3ifcmd_cml_err_mask;
-/*0x09820*/ u64 g3ifcmd_cml_err_alarm;
- u8 unused09b00[0x09b00-0x09828];
-
-/*0x09b00*/ u64 vpath_to_vplane_map[17];
-#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \
- vxge_vBIT(val, 3, 5)
- u8 unused09c30[0x09c30-0x09b88];
-
-/*0x09c30*/ u64 xgxs_cfg_port[2];
-#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27)
-#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4)
-#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4)
-/*0x09c40*/ u64 xgxs_rxber_cfg_port[2];
-#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \
- vxge_vBIT(val, 16, 48)
-/*0x09c50*/ u64 xgxs_rxber_status_port[2];
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \
- vxge_vBIT(val, 0, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \
- vxge_vBIT(val, 32, 16)
-#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \
- vxge_vBIT(val, 48, 16)
-/*0x09c60*/ u64 xgxs_status_port[2];
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \
- vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \
- vxge_vBIT(val, 32, 4)
-#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \
- vxge_vBIT(val, 36, 4)
-/*0x09c70*/ u64 xgxs_pma_reset_port[2];
-#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8)
- u8 unused09c90[0x09c90-0x09c80];
-
-/*0x09c90*/ u64 xgxs_static_cfg_port[2];
-#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3)
- u8 unused09d40[0x09d40-0x09ca0];
-
-/*0x09d40*/ u64 xgxs_info_port[2];
-#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32)
-/*0x09d50*/ u64 ratemgmt_cfg_port[2];
-#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2)
-#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19)
-/*0x09d60*/ u64 ratemgmt_status_port[2];
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3)
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11)
- u8 unused09d80[0x09d80-0x09d70];
-
-/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2];
-#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7)
-/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2];
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \
- vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31)
-#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35)
-/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2];
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \
- vxge_mBIT(11)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \
- vxge_mBIT(15)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31)
-#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35)
-/*0x09db0*/ u64 anbe_cfg_port[2];
-#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2)
-#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2)
-/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2];
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9)
-#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32)
- u8 unused09de0[0x09de0-0x09dd0];
-
-/*0x09de0*/ u64 anbe_fw_mstr_port[2];
-#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3)
-#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7)
-/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \
- vxge_mBIT(3)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \
- vxge_mBIT(7)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \
- vxge_mBIT(11)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \
- vxge_mBIT(15)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \
- vxge_vBIT(val, 18, 6)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \
- vxge_mBIT(27)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \
- vxge_mBIT(35)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \
- vxge_mBIT(39)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \
- vxge_mBIT(43)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \
- vxge_mBIT(47)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \
-vxge_mBIT(51)
-#define \
-VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \
- vxge_mBIT(55)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \
- vxge_vBIT(val, 56, 4)
-#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \
- vxge_vBIT(val, 60, 4)
-/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \
- vxge_mBIT(32)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \
- vxge_mBIT(33)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \
- vxge_mBIT(40)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \
- vxge_mBIT(41)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \
- vxge_mBIT(42)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \
- vxge_vBIT(val, 43, 5)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \
- vxge_mBIT(50)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \
- vxge_vBIT(val, 54, 5)
-#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
- vxge_vBIT(val, 59, 5)
-/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2];
-#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused09e30[0x09e30-0x09e20];
-
-/*0x09e30*/ u64 antp_gen_cfg_port[2];
-/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \
- vxge_vBIT(val, 10, 6)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \
- vxge_mBIT(23)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \
- vxge_mBIT(27)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \
- vxge_mBIT(35)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \
- vxge_mBIT(43)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \
- vxge_mBIT(51)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55)
-#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \
- vxge_mBIT(59)
-/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \
- vxge_vBIT(val, 4, 7)
-#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
- vxge_vBIT(val, 11, 5)
-/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2];
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \
- vxge_vBIT(val, 5, 11)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \
- vxge_vBIT(val, 32, 16)
-/*0x09e70*/ u64 mdio_mgr_access_port[2];
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5)
-#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63)
- u8 unused0a200[0x0a200-0x09e80];
-/*0x0a200*/ u64 xmac_vsport_choices_vh[17];
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
- u8 unused0a400[0x0a400-0x0a288];
-
-/*0x0a400*/ u64 rx_thresh_cfg_vp[17];
-#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8)
- u8 unused0ac90[0x0ac90-0x0a488];
-} __packed;
-
-/*VXGE_HW_SRPCIM_REGS_H*/
-struct vxge_hw_srpcim_reg {
-
-/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh;
-#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \
- vxge_vBIT(val, 0, 32)
- u8 unused00100[0x00100-0x00008];
-
-/*0x00100*/ u64 srpcim_pcipif_int_status;
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3)
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7)
-#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \
- BIT(11)
-/*0x00108*/ u64 srpcim_pcipif_int_mask;
-/*0x00110*/ u64 mrpcim_msg_reg;
-#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3)
-/*0x00118*/ u64 mrpcim_msg_mask;
-/*0x00120*/ u64 mrpcim_msg_alarm;
-/*0x00128*/ u64 vpath_msg_reg;
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15)
-#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16)
-/*0x00130*/ u64 vpath_msg_mask;
-/*0x00138*/ u64 vpath_msg_alarm;
- u8 unused00160[0x00160-0x00140];
-
-/*0x00160*/ u64 srpcim_to_mrpcim_wmsg;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig;
-#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0)
-/*0x00170*/ u64 mrpcim_to_srpcim_rmsg;
-#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel;
-#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \
- vxge_vBIT(val, 0, 5)
-/*0x00180*/ u64 vpath_to_srpcim_rmsg;
-#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \
- vxge_vBIT(val, 0, 64)
- u8 unused00200[0x00200-0x00188];
-
-/*0x00200*/ u64 srpcim_general_int_status;
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0)
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3)
-#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7)
- u8 unused00210[0x00210-0x00208];
-
-/*0x00210*/ u64 srpcim_general_int_mask;
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0)
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3)
-#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7)
- u8 unused00220[0x00220-0x00218];
-
-/*0x00220*/ u64 srpcim_ppif_int_status;
-
-/*0x00228*/ u64 srpcim_ppif_int_mask;
-/*0x00230*/ u64 srpcim_gen_errors_reg;
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19)
-#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23)
-/*0x00238*/ u64 srpcim_gen_errors_mask;
-/*0x00240*/ u64 srpcim_gen_errors_alarm;
-/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg;
-#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3)
-/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask;
-/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm;
-/*0x00260*/ u64 vpath_to_srpcim_alarm_reg;
-
-/*0x00268*/ u64 vpath_to_srpcim_alarm_mask;
-/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm;
- u8 unused00280[0x00280-0x00278];
-
-/*0x00280*/ u64 pf_sw_reset;
-#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8)
-/*0x00288*/ u64 srpcim_general_cfg1;
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35)
-#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39)
-/*0x00290*/ u64 srpcim_interrupt_cfg1;
-#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3)
- u8 unused002a8[0x002a8-0x00298];
-
-/*0x002a8*/ u64 srpcim_clear_msix_mask;
-#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0)
-/*0x002b0*/ u64 srpcim_set_msix_mask;
-#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0)
-/*0x002b8*/ u64 srpcim_clr_msix_one_shot;
-#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0)
-/*0x002c0*/ u64 srpcim_rst_in_prog;
-#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7)
-/*0x002c8*/ u64 srpcim_reg_modified;
-#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7)
-/*0x002d0*/ u64 tgt_pf_illegal_access;
-#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
-/*0x002d8*/ u64 srpcim_msix_status;
-#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3)
-#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7)
- u8 unused00880[0x00880-0x002e0];
-
-/*0x00880*/ u64 xgmac_sr_int_status;
-#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3)
-/*0x00888*/ u64 xgmac_sr_int_mask;
-/*0x00890*/ u64 asic_ntwk_sr_err_reg;
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \
- BIT(11)
-#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15)
-/*0x00898*/ u64 asic_ntwk_sr_err_mask;
-/*0x008a0*/ u64 asic_ntwk_sr_err_alarm;
- u8 unused008c0[0x008c0-0x008a8];
-
-/*0x008c0*/ u64 xmac_vsport_choices_sr_clone;
-#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \
- vxge_vBIT(val, 0, 17)
- u8 unused00900[0x00900-0x008c8];
-
-/*0x00900*/ u64 mr_rqa_top_prty_for_vh;
-#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00908*/ u64 umq_vh_data_list_empty;
-#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \
- BIT(0)
-/*0x00910*/ u64 wde_cfg;
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4)
-#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5)
-#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6)
-#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7)
-#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13)
-#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14)
-#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15)
-#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16)
-#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17)
-#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19)
-#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2)
-#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2)
-
-} __packed;
-
-/*VXGE_HW_VPMGMT_REGS_H*/
-struct vxge_hw_vpmgmt_reg {
-
- u8 unused00040[0x00040-0x00000];
-
-/*0x00040*/ u64 vpath_to_func_map_cfg1;
-#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \
- vxge_vBIT(val, 3, 5)
-/*0x00048*/ u64 vpath_is_first;
-#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3)
-/*0x00050*/ u64 srpcim_to_vpath_wmsg;
-#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig;
-#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \
- vxge_mBIT(0)
- u8 unused00100[0x00100-0x00060];
-
-/*0x00100*/ u64 tim_vpath_assignment;
-#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
- u8 unused00140[0x00140-0x00108];
-
-/*0x00140*/ u64 rqa_top_prty_for_vp;
-#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \
- vxge_vBIT(val, 59, 5)
- u8 unused001c0[0x001c0-0x00148];
-
-/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone;
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \
- vxge_mBIT(19)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \
- vxge_mBIT(23)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \
- vxge_mBIT(39)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \
- vxge_mBIT(43)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \
- vxge_mBIT(47)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \
- vxge_mBIT(51)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \
- vxge_mBIT(55)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \
- vxge_mBIT(59)
-#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63)
-/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone;
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \
- vxge_vBIT(val, 24, 8)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
-#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59)
-/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone;
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \
- vxge_vBIT(val, 5, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \
- vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \
- vxge_vBIT(val, 13, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \
- vxge_vBIT(val, 17, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \
- vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \
- vxge_vBIT(val, 25, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \
- vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \
- vxge_vBIT(val, 33, 3)
-#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \
- vxge_vBIT(val, 37, 3)
-/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3];
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \
- vxge_mBIT(27)
-#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \
- vxge_vBIT(val, 50, 14)
-/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3];
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \
- vxge_vBIT(val, 9, 3)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \
- vxge_vBIT(val, 20, 16)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \
- vxge_mBIT(39)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \
- vxge_mBIT(43)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \
- vxge_vBIT(val, 48, 8)
-#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \
- vxge_mBIT(59)
- u8 unused00240[0x00240-0x00208];
-
-/*0x00240*/ u64 xmac_vsport_choices_vp;
-#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
- u8 unused00260[0x00260-0x00248];
-
-/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone;
-#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3)
-#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \
- vxge_mBIT(11)
-/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2];
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \
- vxge_mBIT(3)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \
- vxge_mBIT(11)
-#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15)
-/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone;
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \
- vxge_vBIT(val, 2, 2)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \
- vxge_vBIT(val, 28, 4)
-#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \
- vxge_vBIT(val, 32, 4)
-/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone;
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \
- vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19)
-#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \
- vxge_vBIT(val, 32, 16)
-/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone;
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \
- vxge_vBIT(val, 4, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \
- vxge_vBIT(val, 8, 4)
-#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15)
-/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3];
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \
- vxge_mBIT(7)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11)
-#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15)
- u8 unused002c0[0x002c0-0x002a8];
-
-/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone;
-#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7)
-/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3];
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3)
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7)
-#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
- u8 unused00300[0x00300-0x002e0];
-
-/*0x00300*/ u64 wol_mp_crc;
-#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63)
-/*0x00308*/ u64 wol_mp_mask_a;
-#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64)
-/*0x00310*/ u64 wol_mp_mask_b;
-#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64)
- u8 unused00360[0x00360-0x00318];
-
-/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone;
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3)
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7)
-#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11)
-/*0x00368*/ u64 rx_datapath_util_vp_clone;
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \
- vxge_vBIT(val, 7, 9)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \
- vxge_vBIT(val, 24, 4)
- u8 unused00380[0x00380-0x00370];
-
-/*0x00380*/ u64 tx_datapath_util_vp_clone;
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \
- vxge_vBIT(val, 7, 9)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \
- vxge_vBIT(val, 16, 4)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \
- vxge_vBIT(val, 20, 4)
-#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \
- vxge_vBIT(val, 24, 4)
-
-} __packed;
-
-struct vxge_hw_vpath_reg {
-
- u8 unused00300[0x00300];
-
-/*0x00300*/ u64 usdc_vpath;
-#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32)
- u8 unused00a00[0x00a00-0x00308];
-
-/*0x00a00*/ u64 wrdma_alarm_status;
-#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1)
-/*0x00a08*/ u64 wrdma_alarm_mask;
- u8 unused00a30[0x00a30-0x00a10];
-
-/*0x00a30*/ u64 prc_alarm_reg;
-#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0)
-#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1)
-#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2)
-#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3)
-/*0x00a38*/ u64 prc_alarm_mask;
-/*0x00a40*/ u64 prc_alarm_alarm;
-/*0x00a48*/ u64 prc_cfg1;
-#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29)
-#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34)
-#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35)
-#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36)
-#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37)
-#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39)
-#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2)
- u8 unused00a60[0x00a60-0x00a50];
-
-/*0x00a60*/ u64 prc_cfg4;
-#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7)
-#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22)
-#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23)
-#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31)
-#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32)
-#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36)
-#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37)
-#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24)
-/*0x00a68*/ u64 prc_cfg5;
-#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61)
-/*0x00a70*/ u64 prc_cfg6;
-#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0)
-#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2)
-#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5)
-#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8)
-#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
-#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
-#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
-#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
-/*0x00a78*/ u64 prc_cfg7;
-#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
-#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12)
-#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14)
-#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4)
-#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5)
-/*0x00a80*/ u64 tim_dest_addr;
-#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64)
-/*0x00a88*/ u64 prc_rxd_doorbell;
-#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16)
-/*0x00a90*/ u64 rqa_prty_for_vp;
-#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5)
-/*0x00a98*/ u64 rxdmem_size;
-#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13)
-/*0x00aa0*/ u64 frm_in_progress_cnt;
-#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \
- vxge_vBIT(val, 59, 5)
-/*0x00aa8*/ u64 rx_multi_cast_stats;
-#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16)
-/*0x00ab0*/ u64 rx_frm_transferred;
-#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \
- vxge_vBIT(val, 32, 32)
-/*0x00ab8*/ u64 rxd_returned;
-#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16)
- u8 unused00c00[0x00c00-0x00ac0];
-
-/*0x00c00*/ u64 kdfc_fifo_trpl_partition;
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15)
-#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15)
-/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl;
-#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7)
-/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl;
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
-/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address;
-#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x00c40*/ u64 kdfc_trpl_fifo_offset;
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15)
-#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15)
-/*0x00c48*/ u64 kdfc_drbl_triplet_total;
-#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \
- vxge_vBIT(val, 17, 15)
- u8 unused00c60[0x00c60-0x00c50];
-
-/*0x00c60*/ u64 usdc_drbl_ctrl;
-#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22)
-#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23)
-/*0x00c68*/ u64 usdc_vp_ready;
-#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7)
-#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15)
-#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23)
-/*0x00c70*/ u64 kdfc_status;
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0)
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1)
-#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2)
- u8 unused00c80[0x00c80-0x00c78];
-
-/*0x00c80*/ u64 xmac_rpa_vcfg;
-#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11)
-#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15)
-#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19)
-#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23)
-/*0x00c88*/ u64 rxmac_vcfg0;
-#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14)
-#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19)
-#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14)
-#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43)
-#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47)
-#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51)
-#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55)
-/*0x00c90*/ u64 rxmac_vcfg1;
-#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2)
-#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47)
-#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51)
-/*0x00c98*/ u64 rts_access_steer_ctrl;
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0)
-#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8)
-/*0x00ca0*/ u64 rts_access_steer_data0;
-#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x00ca8*/ u64 rts_access_steer_data1;
-#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64)
- u8 unused00d00[0x00d00-0x00cb0];
-
-/*0x00d00*/ u64 xmac_vsport_choice;
-#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5)
-/*0x00d08*/ u64 xmac_stats_cfg;
-/*0x00d10*/ u64 xmac_stats_access_cmd;
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15)
-#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
-/*0x00d18*/ u64 xmac_stats_access_data;
-#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
-/*0x00d20*/ u64 asic_ntwk_vp_ctrl;
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55)
-#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63)
- u8 unused00d30[0x00d30-0x00d28];
-
-/*0x00d30*/ u64 xgmac_vp_int_status;
-#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \
- vxge_mBIT(3)
-/*0x00d38*/ u64 xgmac_vp_int_mask;
-/*0x00d40*/ u64 asic_ntwk_vp_err_reg;
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \
- vxge_mBIT(11)
-#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \
- vxge_mBIT(15)
-#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \
- vxge_mBIT(19)
-#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
-/*0x00d48*/ u64 asic_ntwk_vp_err_mask;
-/*0x00d50*/ u64 asic_ntwk_vp_err_alarm;
- u8 unused00d80[0x00d80-0x00d58];
-
-/*0x00d80*/ u64 rtdma_bw_ctrl;
-#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39)
-#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18)
-/*0x00d88*/ u64 rtdma_rd_optimization_ctrl;
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19)
-#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \
- vxge_vBIT(val, 21, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \
- vxge_vBIT(val, 29, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \
- vxge_vBIT(val, 37, 3)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \
- vxge_vBIT(val, 51, 5)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59)
-#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \
- vxge_vBIT(val, 61, 3)
-/*0x00d90*/ u64 pda_pcc_job_monitor;
-#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7)
-/*0x00d98*/ u64 tx_protocol_assist_cfg;
-#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6)
-#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7)
- u8 unused01000[0x01000-0x00da0];
-
-/*0x01000*/ u64 tim_cfg1_int_num[4];
-#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26)
-#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38)
-#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7)
-#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7)
-/*0x01020*/ u64 tim_cfg2_int_num[4];
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16)
-#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16)
-/*0x01040*/ u64 tim_cfg3_int_num[4];
-#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0)
-#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4)
-#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26)
-#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6)
-#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26)
-/*0x01060*/ u64 tim_wrkld_clc;
-#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2)
-#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43)
-#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7)
-/*0x01068*/ u64 tim_bitmap;
-#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32)
-#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33)
-/*0x01070*/ u64 tim_ring_assn;
-#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2)
-/*0x01078*/ u64 tim_remap;
-#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5)
-#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6)
-#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7)
-#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5)
-/*0x01080*/ u64 tim_vpath_map;
-#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
-/*0x01088*/ u64 tim_pci_cfg;
-#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7)
-#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15)
-#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23)
-#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31)
- u8 unused01100[0x01100-0x01090];
-
-/*0x01100*/ u64 sgrp_assign;
-#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64)
-/*0x01108*/ u64 sgrp_aoa_and_result;
-#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \
- vxge_vBIT(val, 0, 64)
-/*0x01110*/ u64 rpe_pci_cfg;
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9)
-#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10)
-#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11)
-#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14)
-#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22)
-#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30)
-#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31)
-/*0x01118*/ u64 rpe_lro_cfg;
-#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7)
-#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11)
-#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15)
-#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23)
-/*0x01120*/ u64 pe_mr2vp_ack_blk_limit;
-#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32)
-/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit;
-#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \
- vxge_vBIT(val, 32, 32)
-/*0x01130*/ u64 txpe_pci_nce_cfg;
-#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32)
-#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55)
-#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63)
- u8 unused01180[0x01180-0x01138];
-
-/*0x01180*/ u64 msg_qpad_en_cfg;
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3)
-#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7)
-#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23)
-#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27)
-#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31)
-/*0x01188*/ u64 msg_pci_cfg;
-#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3)
-#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7)
-#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11)
-#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15)
-/*0x01190*/ u64 umqdmq_ir_init;
-#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64)
-/*0x01198*/ u64 dmq_ir_int;
-#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6)
-#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7)
-#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
-/*0x011a0*/ u64 dmq_bwr_init_add;
-#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
-/*0x011a8*/ u64 dmq_bwr_init_byte;
-#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
-/*0x011b0*/ u64 dmq_ir;
-#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8)
-/*0x011b8*/ u64 umq_int;
-#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6)
-#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7)
-#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
-/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init;
-#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8)
-/*0x011c8*/ u64 umq_bwr_pfch_ctrl;
-#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3)
-/*0x011d0*/ u64 umq_mr2vp_bwr_eol;
-#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32)
-/*0x011d8*/ u64 umq_bwr_init_add;
-#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
-/*0x011e0*/ u64 umq_bwr_init_byte;
-#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
-/*0x011e8*/ u64 gendma_int;
-/*0x011f0*/ u64 umqdmq_ir_init_notify;
-#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
-/*0x011f8*/ u64 dmq_init_notify;
-#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
-/*0x01200*/ u64 umq_init_notify;
-#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
- u8 unused01380[0x01380-0x01208];
-
-/*0x01380*/ u64 tpa_cfg;
-#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3)
-#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7)
-#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11)
-#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15)
- u8 unused01400[0x01400-0x01388];
-
-/*0x01400*/ u64 tx_vp_reset_discarded_frms;
-#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \
- vxge_vBIT(val, 48, 16)
- u8 unused01480[0x01480-0x01408];
-
-/*0x01480*/ u64 fau_rpa_vcfg;
-#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7)
-#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11)
-#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15)
- u8 unused014d0[0x014d0-0x01488];
-
-/*0x014d0*/ u64 dbg_stats_rx_mpa;
-#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16)
-/*0x014d8*/ u64 dbg_stats_rx_fau;
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16)
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \
- vxge_vBIT(val, 16, 16)
-#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused014f0[0x014f0-0x014e0];
-
-/*0x014f0*/ u64 fbmc_vp_rdy;
-#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0)
- u8 unused01e00[0x01e00-0x014f8];
-
-/*0x01e00*/ u64 vpath_pcipif_int_status;
-#define \
-VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \
- vxge_mBIT(3)
-#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \
- vxge_mBIT(7)
-/*0x01e08*/ u64 vpath_pcipif_int_mask;
- u8 unused01e20[0x01e20-0x01e10];
-
-/*0x01e20*/ u64 srpcim_msg_to_vpath_reg;
-#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \
- vxge_mBIT(3)
-/*0x01e28*/ u64 srpcim_msg_to_vpath_mask;
-/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm;
- u8 unused01ea0[0x01ea0-0x01e38];
-
-/*0x01ea0*/ u64 vpath_to_srpcim_wmsg;
-#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \
- vxge_vBIT(val, 0, 64)
-/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig;
-#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \
- vxge_mBIT(0)
- u8 unused02000[0x02000-0x01eb0];
-
-/*0x02000*/ u64 vpath_general_int_status;
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19)
-/*0x02008*/ u64 vpath_general_int_mask;
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19)
-/*0x02010*/ u64 vpath_ppif_int_status;
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \
- vxge_mBIT(3)
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \
- vxge_mBIT(7)
-#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \
- vxge_mBIT(11)
-#define \
-VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \
- vxge_mBIT(15)
-#define \
-VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \
- vxge_mBIT(19)
-/*0x02018*/ u64 vpath_ppif_int_mask;
-/*0x02020*/ u64 kdfcctl_errors_reg;
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35)
-#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39)
-/*0x02028*/ u64 kdfcctl_errors_mask;
-/*0x02030*/ u64 kdfcctl_errors_alarm;
- u8 unused02040[0x02040-0x02038];
-
-/*0x02040*/ u64 general_errors_reg;
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3)
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7)
-#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11)
-#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15)
-#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19)
-#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27)
-#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31)
-/*0x02048*/ u64 general_errors_mask;
-/*0x02050*/ u64 general_errors_alarm;
-/*0x02058*/ u64 pci_config_errors_reg;
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3)
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7)
-#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11)
-/*0x02060*/ u64 pci_config_errors_mask;
-/*0x02068*/ u64 pci_config_errors_alarm;
-/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg;
-#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \
- vxge_mBIT(3)
-/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask;
-/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm;
-/*0x02088*/ u64 srpcim_to_vpath_alarm_reg;
-#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \
- vxge_vBIT(val, 0, 17)
-/*0x02090*/ u64 srpcim_to_vpath_alarm_mask;
-/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm;
- u8 unused02108[0x02108-0x020a0];
-
-/*0x02108*/ u64 kdfcctl_status;
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8)
-#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8)
-/*0x02110*/ u64 rsthdlr_status;
-#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3)
-#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2)
-/*0x02118*/ u64 fifo0_status;
-#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12)
-/*0x02120*/ u64 fifo1_status;
-#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12)
-/*0x02128*/ u64 fifo2_status;
-#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12)
- u8 unused02158[0x02158-0x02130];
-
-/*0x02158*/ u64 tgt_illegal_access;
-#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
- u8 unused02200[0x02200-0x02160];
-
-/*0x02200*/ u64 vpath_general_cfg1;
-#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3)
-#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7)
-#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11)
-#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15)
-#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59)
-#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63)
-/*0x02208*/ u64 vpath_general_cfg2;
-#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3)
-/*0x02210*/ u64 vpath_general_cfg3;
-#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3)
- u8 unused02220[0x02220-0x02218];
-
-/*0x02220*/ u64 kdfcctl_cfg0;
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6)
-#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38)
-#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39)
-
- u8 unused02268[0x02268-0x02228];
-
-/*0x02268*/ u64 stats_cfg;
-#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57)
-/*0x02270*/ u64 interrupt_cfg0;
-#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7)
-#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7)
- u8 unused02280[0x02280-0x02278];
-
-/*0x02280*/ u64 interrupt_cfg2;
-#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
-/*0x02288*/ u64 one_shot_vect0_en;
-#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3)
-/*0x02290*/ u64 one_shot_vect1_en;
-#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3)
-/*0x02298*/ u64 one_shot_vect2_en;
-#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3)
-/*0x022a0*/ u64 one_shot_vect3_en;
-#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3)
- u8 unused022b0[0x022b0-0x022a8];
-
-/*0x022b0*/ u64 pci_config_access_cfg1;
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12)
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15)
-/*0x022b8*/ u64 pci_config_access_cfg2;
-#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0)
-/*0x022c0*/ u64 pci_config_access_status;
-#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0)
-#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32)
- u8 unused02300[0x02300-0x022c8];
-
-/*0x02300*/ u64 vpath_debug_stats0;
-#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32)
-/*0x02308*/ u64 vpath_debug_stats1;
-#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32)
-/*0x02310*/ u64 vpath_debug_stats2;
-#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32)
-/*0x02318*/ u64 vpath_debug_stats3;
-#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \
- vxge_vBIT(val, 0, 64)
-/*0x02320*/ u64 vpath_debug_stats4;
-#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \
- vxge_vBIT(val, 0, 64)
-/*0x02328*/ u64 vpath_debug_stats5;
-#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
-/*0x02330*/ u64 vpath_debug_stats6;
-#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
-/*0x02338*/ u64 vpath_genstats_count01;
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02340*/ u64 vpath_genstats_count23;
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \
- vxge_vBIT(val, 0, 32)
-#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02348*/ u64 vpath_genstats_count4;
-#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \
- vxge_vBIT(val, 32, 32)
-/*0x02350*/ u64 vpath_genstats_count5;
-#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \
- vxge_vBIT(val, 32, 32)
- u8 unused02648[0x02648-0x02358];
-} __packed;
-
-#define VXGE_HW_EEPROM_SIZE (0x01 << 11)
-
-/* Capability lists */
-#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */
-#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */
-#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
deleted file mode 100644
index ee164970b267..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ /dev/null
@@ -1,2428 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/prefetch.h>
-
-#include "vxge-traffic.h"
-#include "vxge-config.h"
-#include "vxge-main.h"
-
-/*
- * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
- * @vp: Virtual Path handle.
- *
- * Enable vpath interrupts. The function is to be executed the last in
- * vpath initialization sequence.
- *
- * See also: vxge_hw_vpath_intr_disable()
- */
-enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- enum vxge_hw_status status = VXGE_HW_OK;
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_ppif_int_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->wrdma_alarm_status);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->xgmac_vp_int_status);
-
- readq(&vp_reg->vpath_general_int_status);
-
- /* Mask unwanted interrupts */
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_mask);
-
- /* Unmask the individual interrupts */
-
- writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
- &vp_reg->general_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
- &vp_reg->kdfcctl_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
- &vp_reg->prc_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
- __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
-
- if (vpath->hldev->first_vp_id != vpath->vp_id)
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_mask);
- else
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
- VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
- VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_pio_mem_write32_upper(0,
- &vp_reg->vpath_general_int_mask);
-exit:
- return status;
-
-}
-
-/*
- * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
- * @vp: Virtual Path handle.
- *
- * Disable vpath interrupts. The function is to be executed the last in
- * vpath initialization sequence.
- *
- * See also: vxge_hw_vpath_intr_enable()
- */
-enum vxge_hw_status vxge_hw_vpath_intr_disable(
- struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_general_int_mask);
-
- writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->pci_config_errors_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->mrpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_to_vpath_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_ppif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->srpcim_msg_to_vpath_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->vpath_pcipif_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->wrdma_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->xgmac_vp_int_mask);
-
- __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_mask);
-
-exit:
- return status;
-}
-
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
-{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
- u64 val64;
-
- if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
- return;
-
- vp_reg = fifo->vp_reg;
- config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
-
- if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- fifo->tim_tti_cfg1_saved = val64;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
-}
-
-void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
-{
- u64 val64 = ring->tim_rti_cfg1_saved;
-
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- ring->tim_rti_cfg1_saved = val64;
- writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-}
-
-void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
-{
- u64 val64 = fifo->tim_tti_cfg3_saved;
- u64 timer = (fifo->rtimer * 1000) / 272;
-
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
- if (timer)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
- VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
-
- writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
- /* tti_cfg3_saved is not updated again because it is
- * initialized at one place only - init time.
- */
-}
-
-void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
-{
- u64 val64 = ring->tim_rti_cfg3_saved;
- u64 timer = (ring->rtimer * 1000) / 272;
-
- val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
- if (timer)
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
- VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
-
- writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
- /* rti_cfg3_saved is not updated again because it is
- * initialized at one place only - init time.
- */
-}
-
-/**
- * vxge_hw_channel_msix_mask - Mask MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSIX ID
- *
- * The function masks the msix interrupt for the given msix_id
- *
- * Returns: 0
- */
-void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->set_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- *
- * Returns: 0
- */
-void
-vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
- * @channel: Channel for rx or tx handle
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- * if configured in MSIX oneshot mode
- *
- * Returns: 0
- */
-void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
-{
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
-}
-
-/**
- * vxge_hw_device_set_intr_type - Updates the configuration
- * with new interrupt type.
- * @hldev: HW device handle.
- * @intr_mode: New interrupt type
- */
-u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
-{
-
- if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (intr_mode != VXGE_HW_INTR_MODE_DEF))
- intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
-
- hldev->config.intr_mode = intr_mode;
- return intr_mode;
-}
-
-/**
- * vxge_hw_device_intr_enable - Enable interrupts.
- * @hldev: HW device handle.
- *
- * Enable Titan interrupts. The function is to be executed the last in
- * Titan initialization sequence.
- *
- * See also: vxge_hw_device_intr_disable()
- */
-void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
-{
- u32 i;
- u64 val64;
- u32 val32;
-
- vxge_hw_device_mask_all(hldev);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- vxge_hw_vpath_intr_enable(
- VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
- }
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
- val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
-
- if (val64 != 0) {
- writeq(val64, &hldev->common_reg->tim_int_status0);
-
- writeq(~val64, &hldev->common_reg->tim_int_mask0);
- }
-
- val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
-
- if (val32 != 0) {
- __vxge_hw_pio_mem_write32_upper(val32,
- &hldev->common_reg->tim_int_status1);
-
- __vxge_hw_pio_mem_write32_upper(~val32,
- &hldev->common_reg->tim_int_mask1);
- }
- }
-
- val64 = readq(&hldev->common_reg->titan_general_int_status);
-
- vxge_hw_device_unmask_all(hldev);
-}
-
-/**
- * vxge_hw_device_intr_disable - Disable Titan interrupts.
- * @hldev: HW device handle.
- *
- * Disable Titan interrupts.
- *
- * See also: vxge_hw_device_intr_enable()
- */
-void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
-{
- u32 i;
-
- vxge_hw_device_mask_all(hldev);
-
- /* mask all the tim interrupts */
- writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
- __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
- &hldev->common_reg->tim_int_mask1);
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- vxge_hw_vpath_intr_disable(
- VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
- }
-}
-
-/**
- * vxge_hw_device_mask_all - Mask all device interrupts.
- * @hldev: HW device handle.
- *
- * Mask all device interrupts.
- *
- * See also: vxge_hw_device_unmask_all()
- */
-void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
-{
- u64 val64;
-
- val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
- VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->titan_mask_all_int);
-}
-
-/**
- * vxge_hw_device_unmask_all - Unmask all device interrupts.
- * @hldev: HW device handle.
- *
- * Unmask all device interrupts.
- *
- * See also: vxge_hw_device_mask_all()
- */
-void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
-{
- u64 val64 = 0;
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
- val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
- &hldev->common_reg->titan_mask_all_int);
-}
-
-/**
- * vxge_hw_device_flush_io - Flush io writes.
- * @hldev: HW device handle.
- *
- * The function performs a read operation to flush io writes.
- *
- * Returns: void
- */
-void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
-{
- readl(&hldev->common_reg->titan_general_int_status);
-}
-
-/**
- * __vxge_hw_device_handle_error - Handle error
- * @hldev: HW device
- * @vp_id: Vpath Id
- * @type: Error type. Please see enum vxge_hw_event{}
- *
- * Handle error.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
- enum vxge_hw_event type)
-{
- switch (type) {
- case VXGE_HW_EVENT_UNKNOWN:
- break;
- case VXGE_HW_EVENT_RESET_START:
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- goto out;
- case VXGE_HW_EVENT_ALARM_CLEARED:
- goto out;
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- case VXGE_HW_EVENT_CRITICAL_ERR:
- case VXGE_HW_EVENT_SERR:
- break;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- break;
- default:
- vxge_assert(0);
- goto out;
- }
-
- /* notify driver */
- if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(hldev,
- type, vp_id);
-out:
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_DOWN)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_DOWN;
-
- /* notify driver */
- if (hldev->uld_callbacks->link_down)
- hldev->uld_callbacks->link_down(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_UP)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_UP;
-
- /* notify driver */
- if (hldev->uld_callbacks->link_up)
- hldev->uld_callbacks->link_up(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
-{
- u64 val64;
- u64 alarm_status;
- u64 pic_status;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
- u64 mask64;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath == NULL) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out2;
- }
-
- hldev = vpath->hldev;
- vp_reg = vpath->vp_reg;
- alarm_status = readq(&vp_reg->vpath_general_int_status);
-
- if (alarm_status == VXGE_HW_ALL_FOXES) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
- alarm_event);
- goto out;
- }
-
- sw_stats = vpath->sw_stats;
-
- if (alarm_status & ~(
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
- sw_stats->error_stats.unknown_alarms++;
-
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out;
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
- val64 = readq(&vp_reg->xgmac_vp_int_status);
-
- if (val64 &
- VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
- val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
- ))) {
- sw_stats->error_stats.network_sustained_fault++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_down_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_DOWN, alarm_event);
- }
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
- ))) {
-
- sw_stats->error_stats.network_sustained_ok++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_up_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_UP, alarm_event);
- }
-
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
-
- if (skip_alarms)
- return VXGE_HW_OK;
- }
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
- pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
- val64 = readq(&vp_reg->general_errors_reg);
- mask64 = readq(&vp_reg->general_errors_mask);
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
- ~mask64) {
- sw_stats->error_stats.ini_serr_det++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_SERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
- ~mask64) {
- sw_stats->error_stats.dblgen_fifo0_overflow++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
- ~mask64)
- sw_stats->error_stats.statsb_pif_chain_error++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
- ~mask64)
- sw_stats->error_stats.statsb_drop_timeout++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
- ~mask64)
- sw_stats->error_stats.target_illegal_access++;
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
-
- val64 = readq(&vp_reg->kdfcctl_errors_reg);
- mask64 = readq(&vp_reg->kdfcctl_errors_mask);
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_poison++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->kdfcctl_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
-
- val64 = readq(&vp_reg->wrdma_alarm_status);
-
- if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
-
- val64 = readq(&vp_reg->prc_alarm_reg);
- mask64 = readq(&vp_reg->prc_alarm_mask);
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
- ~mask64)
- sw_stats->error_stats.prc_ring_bumps++;
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
- ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
- & ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_abort++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
- & ~mask64) {
- sw_stats->error_stats.prc_quanta_size_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
- }
-out:
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
- if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
- (alarm_event == VXGE_HW_EVENT_UNKNOWN))
- return VXGE_HW_OK;
-
- __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
-
- if (alarm_event == VXGE_HW_EVENT_SERR)
- return VXGE_HW_ERR_CRITICAL;
-
- return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
- VXGE_HW_ERR_SLOT_FREEZE :
- (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
- VXGE_HW_ERR_VPATH;
-}
-
-/**
- * vxge_hw_device_begin_irq - Begin IRQ processing.
- * @hldev: HW device handle.
- * @skip_alarms: Do not clear the alarms
- * @reason: "Reason" for the interrupt, the value of Titan's
- * general_int_status register.
- *
- * The function performs two actions, It first checks whether (shared IRQ) the
- * interrupt was raised by the device. Next, it masks the device interrupts.
- *
- * Note:
- * vxge_hw_device_begin_irq() does not flush MMIO writes through the
- * bridge. Therefore, two back-to-back interrupts are potentially possible.
- *
- * Returns: 0, if the interrupt is not "ours" (note that in this case the
- * device remain enabled).
- * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
- * status.
- */
-enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
- u32 skip_alarms, u64 *reason)
-{
- u32 i;
- u64 val64;
- u64 adapter_status;
- u64 vpath_mask;
- enum vxge_hw_status ret = VXGE_HW_OK;
-
- val64 = readq(&hldev->common_reg->titan_general_int_status);
-
- if (unlikely(!val64)) {
- /* not Titan interrupt */
- *reason = 0;
- ret = VXGE_HW_ERR_WRONG_IRQ;
- goto exit;
- }
-
- if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
-
- adapter_status = readq(&hldev->common_reg->adapter_status);
-
- if (adapter_status == VXGE_HW_ALL_FOXES) {
-
- __vxge_hw_device_handle_error(hldev,
- NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
- *reason = 0;
- ret = VXGE_HW_ERR_SLOT_FREEZE;
- goto exit;
- }
- }
-
- hldev->stats.sw_dev_info_stats.total_intr_cnt++;
-
- *reason = val64;
-
- vpath_mask = hldev->vpaths_deployed >>
- (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
-
- if (val64 &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
- hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
-
- return VXGE_HW_OK;
- }
-
- hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
-
- if (unlikely(val64 &
- VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
-
- enum vxge_hw_status error_level = VXGE_HW_OK;
-
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
- if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
- continue;
-
- ret = __vxge_hw_vpath_alarm_process(
- &hldev->virtual_paths[i], skip_alarms);
-
- error_level = VXGE_HW_SET_LEVEL(ret, error_level);
-
- if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
- (ret == VXGE_HW_ERR_SLOT_FREEZE)))
- break;
- }
-
- ret = error_level;
- }
-exit:
- return ret;
-}
-
-/**
- * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
- * condition that has caused the Tx and RX interrupt.
- * @hldev: HW device.
- *
- * Acknowledge (that is, clear) the condition that has caused
- * the Tx and Rx interrupt.
- * See also: vxge_hw_device_begin_irq(),
- * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
- */
-void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
-{
-
- if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
- &hldev->common_reg->tim_int_status0);
- }
-
- if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
- &hldev->common_reg->tim_int_status1);
- }
-}
-
-/*
- * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
- * @channel: Channel
- * @dtrh: Buffer to return the DTR pointer
- *
- * Allocates a dtr from the reserve array. If the reserve array is empty,
- * it swaps the reserve and free arrays.
- *
- */
-static enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
-{
- if (channel->reserve_ptr - channel->reserve_top > 0) {
-_alloc_after_swap:
- *dtrh = channel->reserve_arr[--channel->reserve_ptr];
-
- return VXGE_HW_OK;
- }
-
- /* switch between empty and full arrays */
-
- /* the idea behind such a design is that by having free and reserved
- * arrays separated we basically separated irq and non-irq parts.
- * i.e. no additional lock need to be done when we free a resource */
-
- if (channel->length - channel->free_ptr > 0) {
- swap(channel->reserve_arr, channel->free_arr);
- channel->reserve_ptr = channel->length;
- channel->reserve_top = channel->free_ptr;
- channel->free_ptr = channel->length;
-
- channel->stats->reserve_free_swaps_cnt++;
-
- goto _alloc_after_swap;
- }
-
- channel->stats->full_cnt++;
-
- *dtrh = NULL;
- return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
-}
-
-/*
- * vxge_hw_channel_dtr_post - Post a dtr to the channel
- * @channelh: Channel
- * @dtrh: DTR pointer
- *
- * Posts a dtr to work array.
- *
- */
-static void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
-{
- vxge_assert(channel->work_arr[channel->post_index] == NULL);
-
- channel->work_arr[channel->post_index++] = dtrh;
-
- /* wrap-around */
- if (channel->post_index == channel->length)
- channel->post_index = 0;
-}
-
-/*
- * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
- * @channel: Channel
- * @dtr: Buffer to return the next completed DTR pointer
- *
- * Returns the next completed dtr with out removing it from work array
- *
- */
-void
-vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
-{
- vxge_assert(channel->compl_index < channel->length);
-
- *dtrh = channel->work_arr[channel->compl_index];
- prefetch(*dtrh);
-}
-
-/*
- * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
- * @channel: Channel handle
- *
- * Removes the next completed dtr from work array
- *
- */
-void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
-{
- channel->work_arr[channel->compl_index] = NULL;
-
- /* wrap-around */
- if (++channel->compl_index == channel->length)
- channel->compl_index = 0;
-
- channel->stats->total_compl_cnt++;
-}
-
-/*
- * vxge_hw_channel_dtr_free - Frees a dtr
- * @channel: Channel handle
- * @dtr: DTR pointer
- *
- * Returns the dtr to free array
- *
- */
-void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
-{
- channel->free_arr[--channel->free_ptr] = dtrh;
-}
-
-/*
- * vxge_hw_channel_dtr_count
- * @channel: Channel handle. Obtained via vxge_hw_channel_open().
- *
- * Retrieve number of DTRs available. This function can not be called
- * from data path. ring_initial_replenishi() is the only user.
- */
-int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
-{
- return (channel->reserve_ptr - channel->reserve_top) +
- (channel->length - channel->free_ptr);
-}
-
-/**
- * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
- * with a valid handle.
- *
- * Reserve Rx descriptor for the subsequent filling-in driver
- * and posting on the corresponding channel (@channelh)
- * via vxge_hw_ring_rxd_post().
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
- *
- */
-enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
- void **rxdh)
-{
- enum vxge_hw_status status;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- status = vxge_hw_channel_dtr_alloc(channel, rxdh);
-
- if (status == VXGE_HW_OK) {
- struct vxge_hw_ring_rxd_1 *rxdp =
- (struct vxge_hw_ring_rxd_1 *)*rxdh;
-
- rxdp->control_0 = rxdp->control_1 = 0;
- }
-
- return status;
-}
-
-/**
- * vxge_hw_ring_rxd_free - Free descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Free the reserved descriptor. This operation is "symmetrical" to
- * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
- * lifecycle.
- *
- * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
- * be:
- *
- * - reserved (vxge_hw_ring_rxd_reserve);
- *
- * - posted (vxge_hw_ring_rxd_post);
- *
- * - completed (vxge_hw_ring_rxd_next_completed);
- *
- * - and recycled again (vxge_hw_ring_rxd_free).
- *
- * For alternative state transitions and more details please refer to
- * the design doc.
- *
- */
-void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_free(channel, rxdh);
-
-}
-
-/**
- * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * This routine prepares a rxd and posts
- */
-void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_post(channel, rxdh);
-}
-
-/**
- * vxge_hw_ring_rxd_post_post - Process rxd after post.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Processes rxd after post
- */
-void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
-
- rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
-
- if (ring->stats->common_stats.usage_cnt > 0)
- ring->stats->common_stats.usage_cnt--;
-}
-
-/**
- * vxge_hw_ring_rxd_post - Post descriptor on the ring.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
- *
- * Post descriptor on the ring.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Titan interface specification for a given service (LL, etc.).
- *
- */
-void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
-{
- struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
- wmb();
- rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
-
- vxge_hw_channel_dtr_post(channel, rxdh);
-
- if (ring->stats->common_stats.usage_cnt > 0)
- ring->stats->common_stats.usage_cnt--;
-}
-
-/**
- * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- *
- * Processes rxd after post with memory barrier.
- */
-void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
-{
- wmb();
- vxge_hw_ring_rxd_post_post(ring, rxdh);
-}
-
-/**
- * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle. Returned by HW.
- * @t_code: Transfer code, as per Titan User Guide,
- * Receive Descriptor Format. Returned by HW.
- *
- * Retrieve the _next_ completed descriptor.
- * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
- * driver of new completed descriptors. After that
- * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
- * completions (the very first completion is passed by HW via
- * vxge_hw_ring_callback_f).
- *
- * Implementation-wise, the driver is free to call
- * vxge_hw_ring_rxd_next_completed either immediately from inside the
- * ring callback, or in a deferred fashion and separate (from HW)
- * context.
- *
- * Non-zero @t_code means failure to fill-in receive buffer(s)
- * of the descriptor.
- * For instance, parity error detected during the data transfer.
- * In this case Titan will complete the descriptor and indicate
- * for the host that the received data is not to be used.
- * For details please refer to Titan User Guide.
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
- * are currently available for processing.
- *
- * See also: vxge_hw_ring_callback_f{},
- * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
- */
-enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
- struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
-{
- struct __vxge_hw_channel *channel;
- struct vxge_hw_ring_rxd_1 *rxdp;
- enum vxge_hw_status status = VXGE_HW_OK;
- u64 control_0, own;
-
- channel = &ring->channel;
-
- vxge_hw_channel_dtr_try_complete(channel, rxdh);
-
- rxdp = *rxdh;
- if (rxdp == NULL) {
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
- goto exit;
- }
-
- control_0 = rxdp->control_0;
- own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
- *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
-
- /* check whether it is not the end */
- if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
-
- vxge_assert((rxdp)->host_control !=
- 0);
-
- ++ring->cmpl_cnt;
- vxge_hw_channel_dtr_complete(channel);
-
- vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
-
- ring->stats->common_stats.usage_cnt++;
- if (ring->stats->common_stats.usage_max <
- ring->stats->common_stats.usage_cnt)
- ring->stats->common_stats.usage_max =
- ring->stats->common_stats.usage_cnt;
-
- status = VXGE_HW_OK;
- goto exit;
- }
-
- /* reset it. since we don't want to return
- * garbage to the driver */
- *rxdh = NULL;
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_ring_handle_tcode - Handle transfer code.
- * @ring: Handle to the ring object used for receive
- * @rxdh: Descriptor handle.
- * @t_code: One of the enumerated (and documented in the Titan user guide)
- * "transfer codes".
- *
- * Handle descriptor's transfer code. The latter comes with each completed
- * descriptor.
- *
- * Returns: one of the enum vxge_hw_status{} enumerated types.
- * VXGE_HW_OK - for success.
- * VXGE_HW_ERR_CRITICAL - when encounters critical error.
- */
-enum vxge_hw_status vxge_hw_ring_handle_tcode(
- struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- /* If the t_code is not supported and if the
- * t_code is other than 0x5 (unparseable packet
- * such as unknown UPV6 header), Drop it !!!
- */
-
- if (t_code == VXGE_HW_RING_T_CODE_OK ||
- t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
- status = VXGE_HW_OK;
- goto exit;
- }
-
- if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
- status = VXGE_HW_ERR_INVALID_TCODE;
- goto exit;
- }
-
- ring->stats->rxd_t_code_err_cnt[t_code]++;
-exit:
- return status;
-}
-
-/**
- * __vxge_hw_non_offload_db_post - Post non offload doorbell
- *
- * @fifo: fifohandle
- * @txdl_ptr: The starting location of the TxDL in host memory
- * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
- * @no_snoop: No snoop flags
- *
- * This function posts a non-offload doorbell to doorbell FIFO
- *
- */
-static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
- u64 txdl_ptr, u32 num_txds, u32 no_snoop)
-{
- writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
- VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
- VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
- &fifo->nofl_db->control_0);
-
- writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
-}
-
-/**
- * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
- * the fifo
- * @fifoh: Handle to the fifo object used for non offload send
- */
-u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
-{
- return vxge_hw_channel_dtr_count(&fifoh->channel);
-}
-
-/**
- * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
- * with a valid handle.
- * @txdl_priv: Buffer to return the pointer to per txdl space
- *
- * Reserve a single TxDL (that is, fifo descriptor)
- * for the subsequent filling-in by driver)
- * and posting on the corresponding channel (@channelh)
- * via vxge_hw_fifo_txdl_post().
- *
- * Note: it is the responsibility of driver to reserve multiple descriptors
- * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
- * carries up to configured number (fifo.max_frags) of contiguous buffers.
- *
- * Returns: VXGE_HW_OK - success;
- * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
- *
- */
-enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
- struct __vxge_hw_fifo *fifo,
- void **txdlh, void **txdl_priv)
-{
- struct __vxge_hw_channel *channel;
- enum vxge_hw_status status;
- int i;
-
- channel = &fifo->channel;
-
- status = vxge_hw_channel_dtr_alloc(channel, txdlh);
-
- if (status == VXGE_HW_OK) {
- struct vxge_hw_fifo_txd *txdp =
- (struct vxge_hw_fifo_txd *)*txdlh;
- struct __vxge_hw_fifo_txdl_priv *priv;
-
- priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
-
- /* reset the TxDL's private */
- priv->align_dma_offset = 0;
- priv->align_vaddr_start = priv->align_vaddr;
- priv->align_used_frags = 0;
- priv->frags = 0;
- priv->alloc_frags = fifo->config->max_frags;
- priv->next_txdl_priv = NULL;
-
- *txdl_priv = (void *)(size_t)txdp->host_control;
-
- for (i = 0; i < fifo->config->max_frags; i++) {
- txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
- txdp->control_0 = txdp->control_1 = 0;
- }
- }
-
- return status;
-}
-
-/**
- * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
- * descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- * @frag_idx: Index of the data buffer in the caller's scatter-gather list
- * (of buffers).
- * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
- * @size: Size of the data buffer (in bytes).
- *
- * This API is part of the preparation of the transmit descriptor for posting
- * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
- * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
- * All three APIs fill in the fields of the fifo descriptor,
- * in accordance with the Titan specification.
- *
- */
-void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
- void *txdlh, u32 frag_idx,
- dma_addr_t dma_pointer, u32 size)
-{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp, *txdp_last;
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
-
- if (frag_idx != 0)
- txdp->control_0 = txdp->control_1 = 0;
- else {
- txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
- VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
- txdp->control_1 |= fifo->interrupt_type;
- txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
- fifo->tx_intr_num);
- if (txdl_priv->frags) {
- txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
- (txdl_priv->frags - 1);
- txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
- VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
- }
- }
-
- vxge_assert(frag_idx < txdl_priv->alloc_frags);
-
- txdp->buffer_pointer = (u64)dma_pointer;
- txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
- fifo->stats->total_buffers++;
- txdl_priv->frags++;
-}
-
-/**
- * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
- *
- * Post descriptor on the 'fifo' type channel for transmission.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Titan interface specification for a given service (LL, etc.).
- *
- */
-void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
-{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- struct vxge_hw_fifo_txd *txdp_last;
- struct vxge_hw_fifo_txd *txdp_first;
-
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp_first = txdlh;
-
- txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
- txdp_last->control_0 |=
- VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
- txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
-
- vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
-
- __vxge_hw_non_offload_db_post(fifo,
- (u64)txdl_priv->dma_addr,
- txdl_priv->frags - 1,
- fifo->no_snoop_bits);
-
- fifo->stats->total_posts++;
- fifo->stats->common_stats.usage_cnt++;
- if (fifo->stats->common_stats.usage_max <
- fifo->stats->common_stats.usage_cnt)
- fifo->stats->common_stats.usage_max =
- fifo->stats->common_stats.usage_cnt;
-}
-
-/**
- * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle. Returned by HW.
- * @t_code: Transfer code, as per Titan User Guide,
- * Transmit Descriptor Format.
- * Returned by HW.
- *
- * Retrieve the _next_ completed descriptor.
- * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
- * driver of new completed descriptors. After that
- * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
- * completions (the very first completion is passed by HW via
- * vxge_hw_channel_callback_f).
- *
- * Implementation-wise, the driver is free to call
- * vxge_hw_fifo_txdl_next_completed either immediately from inside the
- * channel callback, or in a deferred fashion and separate (from HW)
- * context.
- *
- * Non-zero @t_code means failure to process the descriptor.
- * The failure could happen, for instance, when the link is
- * down, in which case Titan completes the descriptor because it
- * is not able to send the data out.
- *
- * For details please refer to Titan User Guide.
- *
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
- * are currently available for processing.
- *
- */
-enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
- struct __vxge_hw_fifo *fifo, void **txdlh,
- enum vxge_hw_fifo_tcode *t_code)
-{
- struct __vxge_hw_channel *channel;
- struct vxge_hw_fifo_txd *txdp;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- channel = &fifo->channel;
-
- vxge_hw_channel_dtr_try_complete(channel, txdlh);
-
- txdp = *txdlh;
- if (txdp == NULL) {
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
- goto exit;
- }
-
- /* check whether host owns it */
- if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
-
- vxge_assert(txdp->host_control != 0);
-
- vxge_hw_channel_dtr_complete(channel);
-
- *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
-
- if (fifo->stats->common_stats.usage_cnt > 0)
- fifo->stats->common_stats.usage_cnt--;
-
- status = VXGE_HW_OK;
- goto exit;
- }
-
- /* no more completions */
- *txdlh = NULL;
- status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_fifo_handle_tcode - Handle transfer code.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- * @t_code: One of the enumerated (and documented in the Titan user guide)
- * "transfer codes".
- *
- * Handle descriptor's transfer code. The latter comes with each completed
- * descriptor.
- *
- * Returns: one of the enum vxge_hw_status{} enumerated types.
- * VXGE_HW_OK - for success.
- * VXGE_HW_ERR_CRITICAL - when encounters critical error.
- */
-enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
- status = VXGE_HW_ERR_INVALID_TCODE;
- goto exit;
- }
-
- fifo->stats->txd_t_code_err_cnt[t_code]++;
-exit:
- return status;
-}
-
-/**
- * vxge_hw_fifo_txdl_free - Free descriptor.
- * @fifo: Handle to the fifo object used for non offload send
- * @txdlh: Descriptor handle.
- *
- * Free the reserved descriptor. This operation is "symmetrical" to
- * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
- * lifecycle.
- *
- * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
- * be:
- *
- * - reserved (vxge_hw_fifo_txdl_reserve);
- *
- * - posted (vxge_hw_fifo_txdl_post);
- *
- * - completed (vxge_hw_fifo_txdl_next_completed);
- *
- * - and recycled again (vxge_hw_fifo_txdl_free).
- *
- * For alternative state transitions and more details please refer to
- * the design doc.
- *
- */
-void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
-{
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
- vxge_hw_channel_dtr_free(channel, txdlh);
-}
-
-/**
- * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- * @duplicate_mode: Duplicate MAC address add mode. Please see
- * enum vxge_hw_vpath_mac_addr_add_mode{}
- *
- * Adds the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_add(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask,
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- data1 <<= 8;
- data1 |= (u8)macaddr[i];
-
- data2 <<= 8;
- data2 |= (u8)macaddr_mask[i];
- }
-
- switch (duplicate_mode) {
- case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
- i = 0;
- break;
- case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
- i = 1;
- break;
- case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
- i = 2;
- break;
- default:
- i = 0;
- break;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_get - Get the first mac address entry
- * @vp: Vpath handle.
- * @macaddr: First MAC address entry for this vpath in the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Get the first mac address entry for this vpath from MAC address table.
- * Return: the first mac address and mac address mask in the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data1, &data2);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry
- * @vp: Vpath handle.
- * @macaddr: Next MAC address entry for this vpath in the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Get the next mac address entry for this vpath from MAC address table.
- * Return: the next mac address and mac address mask in the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get_next(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0, &data1, &data2);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
-
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Delete the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
- struct __vxge_hw_vpath_handle *vp,
- u8 *macaddr,
- u8 *macaddr_mask)
-{
- u32 i;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- for (i = 0; i < ETH_ALEN; i++) {
- data1 <<= 8;
- data1 |= (u8)macaddr[i];
-
- data2 <<= 8;
- data2 |= (u8)macaddr_mask[i];
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
- 0,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
- VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_delete
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
- * to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_add
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_set(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Enable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_disable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- /* Enable promiscuous mode for function 0 only */
- if (!(vpath->hldev->access_rights &
- VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
- return VXGE_HW_OK;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
-
- val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_BCAST_EN |
- VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Disable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_enable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
-
- val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
- VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
-
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_bcast_enable - Enable broadcast
- * @vp: Vpath handle.
- *
- * Enable receiving broadcasts.
- */
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
- val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
- * @vp: Vpath handle.
- *
- * Enable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK on success.
- *
- */
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
- struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
- val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
- * @vp: Vpath handle.
- *
- * Disable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- vpath = vp->vpath;
-
- val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
- if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
- val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
- writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
- }
-exit:
- return status;
-}
-
-/*
- * vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-enum vxge_hw_status vxge_hw_vpath_alarm_process(
- struct __vxge_hw_vpath_handle *vp,
- u32 skip_alarms)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
-exit:
- return status;
-}
-
-/**
- * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
- * alrms
- * @vp: Virtual Path handle.
- * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
- * interrupts(Can be repeated). If fifo or ring are not enabled
- * the MSIX vector for that should be set to 0
- * @alarm_msix_id: MSIX vector for alarm.
- *
- * This API will associate a given MSIX vector numbers with the four TIM
- * interrupts and alarm interrupt.
- */
-void
-vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
- int alarm_msix_id)
-{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath = vp->vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- u32 vp_id = vp->vpath->vp_id;
-
- val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
- (vp_id * 4) + tim_msix_id[0]) |
- VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
- (vp_id * 4) + tim_msix_id[1]);
-
- writeq(val64, &vp_reg->interrupt_cfg0);
-
- writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
- (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
- &vp_reg->interrupt_cfg2);
-
- if (vpath->hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
- 0, 32), &vp_reg->one_shot_vect0_en);
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
- 0, 32), &vp_reg->one_shot_vect1_en);
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
- 0, 32), &vp_reg->one_shot_vect2_en);
- }
-}
-
-/**
- * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSIX ID
- *
- * The function masks the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
-}
-
-/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
- &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
- else
- __vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
- &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
-}
-
-/**
- * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function unmasks the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
-}
-
-/**
- * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
- * @vp: Virtual Path handle.
- *
- * Mask Tx and Rx vpath interrupts.
- *
- * See also: vxge_hw_vpath_inta_mask_tx_rx()
- */
-void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
-{
- u64 tim_int_mask0[4] = {[0 ...3] = 0};
- u32 tim_int_mask1[4] = {[0 ...3] = 0};
- u64 val64;
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
- tim_int_mask1, vp->vpath->vp_id);
-
- val64 = readq(&hldev->common_reg->tim_int_mask0);
-
- if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
- &hldev->common_reg->tim_int_mask0);
- }
-
- val64 = readl(&hldev->common_reg->tim_int_mask1);
-
- if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
- &hldev->common_reg->tim_int_mask1);
- }
-}
-
-/**
- * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
- * @vp: Virtual Path handle.
- *
- * Unmask Tx and Rx vpath interrupts.
- *
- * See also: vxge_hw_vpath_inta_mask_tx_rx()
- */
-void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
-{
- u64 tim_int_mask0[4] = {[0 ...3] = 0};
- u32 tim_int_mask1[4] = {[0 ...3] = 0};
- u64 val64;
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
- tim_int_mask1, vp->vpath->vp_id);
-
- val64 = readq(&hldev->common_reg->tim_int_mask0);
-
- if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
- writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
- &hldev->common_reg->tim_int_mask0);
- }
-
- if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
- (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
- __vxge_hw_pio_mem_write32_upper(
- (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
- tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
- &hldev->common_reg->tim_int_mask1);
- }
-}
-
-/**
- * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
- * descriptors and process the same.
- * @ring: Handle to the ring object used for receive
- *
- * The function polls the Rx for the completed descriptors and calls
- * the driver via supplied completion callback.
- *
- * Returns: VXGE_HW_OK, if the polling is completed successful.
- * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
- * descriptors available which are yet to be processed.
- *
- * See also: vxge_hw_vpath_poll_rx()
- */
-enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
-{
- u8 t_code;
- enum vxge_hw_status status = VXGE_HW_OK;
- void *first_rxdh;
- int new_count = 0;
-
- ring->cmpl_cnt = 0;
-
- status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
- if (status == VXGE_HW_OK)
- ring->callback(ring, first_rxdh,
- t_code, ring->channel.userdata);
-
- if (ring->cmpl_cnt != 0) {
- ring->doorbell_cnt += ring->cmpl_cnt;
- if (ring->doorbell_cnt >= ring->rxds_limit) {
- /*
- * Each RxD is of 4 qwords, update the number of
- * qwords replenished
- */
- new_count = (ring->doorbell_cnt * 4);
-
- /* For each block add 4 more qwords */
- ring->total_db_cnt += ring->doorbell_cnt;
- if (ring->total_db_cnt >= ring->rxds_per_block) {
- new_count += 4;
- /* Reset total count */
- ring->total_db_cnt %= ring->rxds_per_block;
- }
- writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
- &ring->vp_reg->prc_rxd_doorbell);
- readl(&ring->common_reg->titan_general_int_status);
- ring->doorbell_cnt = 0;
- }
- }
-
- return status;
-}
-
-/**
- * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
- * @fifo: Handle to the fifo object used for non offload send
- * @skb_ptr: pointer to skb
- * @nr_skb: number of skbs
- * @more: more is coming
- *
- * The function polls the Tx for the completed descriptors and calls
- * the driver via supplied completion callback.
- *
- * Returns: VXGE_HW_OK, if the polling is completed successful.
- * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
- * descriptors available which are yet to be processed.
- */
-enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
- struct sk_buff ***skb_ptr, int nr_skb,
- int *more)
-{
- enum vxge_hw_fifo_tcode t_code;
- void *first_txdlh;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
- status = vxge_hw_fifo_txdl_next_completed(fifo,
- &first_txdlh, &t_code);
- if (status == VXGE_HW_OK)
- if (fifo->callback(fifo, first_txdlh, t_code,
- channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
- status = VXGE_HW_COMPLETIONS_REMAIN;
-
- return status;
-}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
deleted file mode 100644
index ba6f833bb059..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+++ /dev/null
@@ -1,2290 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_TRAFFIC_H
-#define VXGE_TRAFFIC_H
-
-#include "vxge-reg.h"
-#include "vxge-version.h"
-
-#define VXGE_HW_DTR_MAX_T_CODE 16
-#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
-#define VXGE_HW_MAX_VIRTUAL_PATHS 17
-
-#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
-
-#define VXGE_HW_DEFAULT_32 0xffffffff
-/* frames sizes */
-#define VXGE_HW_HEADER_802_2_SIZE 3
-#define VXGE_HW_HEADER_SNAP_SIZE 5
-#define VXGE_HW_HEADER_VLAN_SIZE 4
-#define VXGE_HW_MAC_HEADER_MAX_SIZE \
- (ETH_HLEN + \
- VXGE_HW_HEADER_802_2_SIZE + \
- VXGE_HW_HEADER_VLAN_SIZE + \
- VXGE_HW_HEADER_SNAP_SIZE)
-
-/* 32bit alignments */
-#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
-#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
-#define VXGE_HW_HEADER_802_2_ALIGN 3
-#define VXGE_HW_HEADER_SNAP_ALIGN 1
-
-#define VXGE_HW_L3_CKSUM_OK 0xFFFF
-#define VXGE_HW_L4_CKSUM_OK 0xFFFF
-
-/* Forward declarations */
-struct __vxge_hw_device;
-struct __vxge_hw_vpath_handle;
-struct vxge_hw_vp_config;
-struct __vxge_hw_virtualpath;
-struct __vxge_hw_channel;
-struct __vxge_hw_fifo;
-struct __vxge_hw_ring;
-struct vxge_hw_ring_attr;
-struct vxge_hw_mempool;
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/*VXGE_HW_STATUS_H*/
-
-#define VXGE_HW_EVENT_BASE 0
-#define VXGE_LL_EVENT_BASE 100
-
-/**
- * enum vxge_hw_event- Enumerates slow-path HW events.
- * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
- * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
- * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
- * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
- * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
- * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
- * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
- * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
- * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
- * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
- * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
- * slot-freeze from the rest critical events (e.g. ECC) when it is
- * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
- *
- * enum vxge_hw_event enumerates slow-path HW eventis.
- *
- * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
- * vxge_uld_link_down_f{}.
- */
-enum vxge_hw_event {
- VXGE_HW_EVENT_UNKNOWN = 0,
- /* HW events */
- VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
- VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
- VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
- VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
- VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
- VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
- VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
- VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
- VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
- VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
- VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
- VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
- VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
- VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
-};
-
-#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
-
-/*
- * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
- caller.
- */
-struct vxge_hw_mempool_dma {
- dma_addr_t addr;
- struct pci_dev *handle;
- struct pci_dev *acc_handle;
-};
-
-/*
- * vxge_hw_mempool_item_f - Mempool item alloc/free callback
- * @mempoolh: Memory pool handle.
- * @memblock: Address of memory block
- * @memblock_index: Index of memory block
- * @item: Item that gets allocated or freed.
- * @index: Item's index in the memory pool.
- * @is_last: True, if this item is the last one in the pool; false - otherwise.
- * userdata: Per-pool user context.
- *
- * Memory pool allocation/deallocation callback.
- */
-
-/*
- * struct vxge_hw_mempool - Memory pool.
- */
-struct vxge_hw_mempool {
-
- void (*item_func_alloc)(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index,
- u32 is_last);
-
- void *userdata;
- void **memblocks_arr;
- void **memblocks_priv_arr;
- struct vxge_hw_mempool_dma *memblocks_dma_arr;
- struct __vxge_hw_device *devh;
- u32 memblock_size;
- u32 memblocks_max;
- u32 memblocks_allocated;
- u32 item_size;
- u32 items_max;
- u32 items_initial;
- u32 items_current;
- u32 items_per_memblock;
- void **items_arr;
- u32 items_priv_size;
-};
-
-#define VXGE_HW_MAX_INTR_PER_VP 4
-#define VXGE_HW_VPATH_INTR_TX 0
-#define VXGE_HW_VPATH_INTR_RX 1
-#define VXGE_HW_VPATH_INTR_EINTA 2
-#define VXGE_HW_VPATH_INTR_BMAP 3
-
-#define VXGE_HW_BLOCK_SIZE 4096
-
-/**
- * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
- * @intr_enable: Set to 1, if interrupt is enabled.
- * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
- * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
- * asserted, other interrupt-generating entities will cancel the
- * scheduled timer interrupt.
- * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
- * When asserted, an interrupt will be generated every time the
- * boundary timer expires, even if no traffic has been transmitted
- * on this interrupt.
- * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
- * (Re-) Interrupt Enable: When asserted, an interrupt will be
- * generated the next time the timer expires, even if no traffic has
- * been transmitted on this interrupt. (This will only happen once
- * each time that this value is written to the TIM.) This bit is
- * cleared by H/W at the end of the current-timer-interval when
- * the interrupt is triggered.
- * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
- * @util_sel: Utilization Selector. Selects which of the workload approximations
- * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
- * specified utilization etc.), selects one of
- * the 17 host configured values.
- * 0-Virtual Path 0
- * 1-Virtual Path 1
- * ...
- * 16-Virtual Path 17
- * 17-Legacy Tx network utilization, provided by TPA
- * 18-Legacy Rx network utilization, provided by FAU
- * 19-Average of legacy Rx and Tx utilization calculated from link
- * utilization values.
- * 20-31-Invalid configurations
- * 32-Host utilization for Virtual Path 0
- * 33-Host utilization for Virtual Path 1
- * ...
- * 48-Host utilization for Virtual Path 17
- * 49-Legacy Tx network utilization, provided by TPA
- * 50-Legacy Rx network utilization, provided by FAU
- * 51-Average of legacy Rx and Tx utilization calculated from
- * link utilization values.
- * 52-63-Invalid configurations
- * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
- * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
- * to 1 enables counting of TxD0 returns (signalled by PCC's),
- * towards utilization event count values.
- * @urange_a: Defines the upper limit (in percent) for this utilization range
- * to be active. This range is considered active
- * if 0 = UTIL = URNG_A
- * and the UEC_A field (below) is non-zero.
- * @uec_a: Utilization Event Count A. If this range is active, the adapter will
- * wait until UEC_A events have occurred on the interrupt before
- * generating an interrupt.
- * @urange_b: Link utilization range B.
- * @uec_b: Utilization Event Count B.
- * @urange_c: Link utilization range C.
- * @uec_c: Utilization Event Count C.
- * @urange_d: Link utilization range D.
- * @uec_d: Utilization Event Count D.
- * Traffic Interrupt Controller Module interrupt configuration.
- */
-struct vxge_hw_tim_intr_config {
-
- u32 intr_enable;
-#define VXGE_HW_TIM_INTR_ENABLE 1
-#define VXGE_HW_TIM_INTR_DISABLE 0
-#define VXGE_HW_TIM_INTR_DEFAULT 0
-
- u32 btimer_val;
-#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
-#define VXGE_HW_USE_FLASH_DEFAULT (~0)
-
- u32 timer_ac_en;
-#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
-#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
-
- u32 timer_ci_en;
-#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
-#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
-
- u32 timer_ri_en;
-#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
-#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
-
- u32 rtimer_val;
-#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
-
- u32 util_sel;
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
-#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
-#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
-
- u32 ltimer_val;
-#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
-#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
-
- /* Line utilization interrupts */
- u32 urange_a;
-#define VXGE_HW_MIN_TIM_URANGE_A 0
-#define VXGE_HW_MAX_TIM_URANGE_A 100
-
- u32 uec_a;
-#define VXGE_HW_MIN_TIM_UEC_A 0
-#define VXGE_HW_MAX_TIM_UEC_A 65535
-
- u32 urange_b;
-#define VXGE_HW_MIN_TIM_URANGE_B 0
-#define VXGE_HW_MAX_TIM_URANGE_B 100
-
- u32 uec_b;
-#define VXGE_HW_MIN_TIM_UEC_B 0
-#define VXGE_HW_MAX_TIM_UEC_B 65535
-
- u32 urange_c;
-#define VXGE_HW_MIN_TIM_URANGE_C 0
-#define VXGE_HW_MAX_TIM_URANGE_C 100
-
- u32 uec_c;
-#define VXGE_HW_MIN_TIM_UEC_C 0
-#define VXGE_HW_MAX_TIM_UEC_C 65535
-
- u32 uec_d;
-#define VXGE_HW_MIN_TIM_UEC_D 0
-#define VXGE_HW_MAX_TIM_UEC_D 65535
-};
-
-#define VXGE_HW_STATS_OP_READ 0
-#define VXGE_HW_STATS_OP_CLEAR_STAT 1
-#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
-#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
-#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
-
-#define VXGE_HW_STATS_LOC_AGGR 17
-#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
-
-#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
-#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
-
-#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
- vxge_bVALn(bits, 32, 32)
-
-#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
- vxge_bVALn(bits, 0, 32)
-
-#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
- vxge_bVALn(bits, 32, 32)
-
-/**
- * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
- *
- * @tx_frms: Count of data frames transmitted on this Aggregator on all
- * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
- * However, does include frames discarded by the Distribution
- * function.
- * @tx_data_octets: Count of data and padding octets of frames transmitted
- * on this Aggregator on all its Aggregation ports. Does not include
- * octets of LACPDUs or Marker PDUs. However, does include octets of
- * frames discarded by the Distribution function.
- * @tx_mcast_frms: Count of data frames transmitted (to a group destination
- * address other than the broadcast address) on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. However, does include frames discarded by the Distribution
- * function.
- * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
- * on all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. However, does include frames discarded by the Distribution
- * function.
- * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
- * that are discarded by the Distribution function. This occurs when
- * conversation are allocated to different ports and have to be
- * flushed on old ports
- * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
- * experience transmission errors on its Aggregation ports.
- * @rx_frms: Count of data frames received on this Aggregator on all its
- * Aggregation ports. Does not include LACPDUs or Marker PDUs.
- * Also, does not include frames discarded by the Collection
- * function.
- * @rx_data_octets: Count of data and padding octets of frames received on this
- * Aggregator on all its Aggregation ports. Does not include octets
- * of LACPDUs or Marker PDUs. Also, does not include
- * octets of frames
- * discarded by the Collection function.
- * @rx_mcast_frms: Count of data frames received (from a group destination
- * address other than the broadcast address) on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. Also, does not include frames discarded by the Collection
- * function.
- * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
- * all its Aggregation ports. Does not include LACPDUs or Marker
- * PDUs. Also, does not include frames discarded by the Collection
- * function.
- * @rx_discarded_frms: Count of data frames received on this Aggregator that are
- * discarded by the Collection function because the Collection
- * function was disabled on the port which the frames are received.
- * @rx_errored_frms: Count of data frames received on this Aggregator that are
- * discarded by its Aggregation ports, or are discarded by the
- * Collection function of the Aggregator, or that are discarded by
- * the Aggregator due to detection of an illegal Slow Protocols PDU.
- * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
- * that are discarded by its Aggregation ports due to detection of
- * an unknown Slow Protocols PDU.
- *
- * Per aggregator XMAC RX statistics.
- */
-struct vxge_hw_xmac_aggr_stats {
-/*0x000*/ u64 tx_frms;
-/*0x008*/ u64 tx_data_octets;
-/*0x010*/ u64 tx_mcast_frms;
-/*0x018*/ u64 tx_bcast_frms;
-/*0x020*/ u64 tx_discarded_frms;
-/*0x028*/ u64 tx_errored_frms;
-/*0x030*/ u64 rx_frms;
-/*0x038*/ u64 rx_data_octets;
-/*0x040*/ u64 rx_mcast_frms;
-/*0x048*/ u64 rx_bcast_frms;
-/*0x050*/ u64 rx_discarded_frms;
-/*0x058*/ u64 rx_errored_frms;
-/*0x060*/ u64 rx_unknown_slow_proto_frms;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
- *
- * @tx_ttl_frms: Count of successfully transmitted MAC frames
- * @tx_ttl_octets: Count of total octets of transmitted frames, not including
- * framing characters (i.e. less framing bits). To determine the
- * total octets of transmitted frames, including framing characters,
- * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
- * otherwise configured, this stat only counts frames that have
- * 8 bytes of preamble for each frame). This stat can be configured
- * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
- * including the preamble octets.
- * @tx_data_octets: Count of data and padding octets of successfully transmitted
- * frames.
- * @tx_mcast_frms: Count of successfully transmitted frames to a group address
- * other than the broadcast address.
- * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
- * group address.
- * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
- * Includes discarded frames that are not sent to the network.
- * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
- * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
- * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
- * are passed to the network.
- * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
- * due to problems within ICMP.
- * @tx_tcp: Count of transmitted TCP segments. Does not include segments
- * containing retransmitted octets.
- * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
- * @tx_udp: Count of transmitted UDP datagrams.
- * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
- * generally occurs when a packet is corrupt somehow, including
- * packets that have IP version mismatches, invalid Layer 2 control
- * fields, etc. L3/L4 checksums are not offloaded, but the packet
- * is still be transmitted.
- * @tx_unknown_protocol: Increments when the TPA encounters an unknown
- * protocol, such as a new IPv6 extension header, or an unsupported
- * Routing Type. The packet still has a checksum calculated but it
- * may be incorrect.
- * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
- * Since, the only control frames supported by this device are
- * PAUSE frames, this register is a count of all transmitted MAC
- * control frames.
- * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
- * on this Aggregation port.
- * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
- * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
- * the network. Increments because of:
- * 1) An internal processing error
- * (such as an uncorrectable ECC error). 2) A frame parsing error
- * during IP checksum calculation.
- * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
- * Aggregation port.
- * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
- * characters that match a pattern that is programmable through
- * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
- * is set to /T/ (i.e. the terminate character), thus the statistic
- * tracks the number of transmitted Terminate characters.
- * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
- * characters that match a pattern that is programmable through
- * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
- * is set to /S/ (i.e. the start character),
- * thus the statistic tracks
- * the number of transmitted Start characters.
- * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
- * columns that match a pattern that is programmable through register
- * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns transmitted at
- * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
- * set to 1, then this stat increments when COLUMN2 is found within
- * 'n' clocks after COLUMN1. Here, 'n' is defined by
- * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
- * to 0, then it means to search anywhere for COLUMN2).
- * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
- * columns that match a pattern that is programmable through register
- * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
- * to 4 x /I/ (i.e. a column containing all idle characters),
- * thus the statistic tracks the number of transmitted Idle columns.
- * @tx_any_err_frms: Count of transmitted frames containing any error that
- * prevents them from being passed to the network. Increments if
- * there is an ECC while reading the frame out of the transmit
- * buffer. Also increments if the transmit protocol assist (TPA)
- * block determines that the frame should not be sent.
- * @tx_drop_frms: Count of frames that could not be sent for no other reason
- * than internal MAC processing. Increments once whenever the
- * transmit buffer is flushed (due to an ECC error on a memory
- * descriptor).
- * @rx_ttl_frms: Count of total received MAC frames, including frames received
- * with frame-too-long, FCS, or length errors. This stat can be
- * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
- * everything, even "frames" as small one byte of preamble.
- * @rx_vld_frms: Count of successfully received MAC frames. Does not include
- * frames received with frame-too-long, FCS, or length errors.
- * @rx_offload_frms: Count of offloaded received frames that are passed to
- * the host.
- * @rx_ttl_octets: Count of total octets of received frames, not including
- * framing characters (i.e. less framing bits). To determine the
- * total octets of received frames, including framing characters,
- * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
- * otherwise configured, this stat only counts frames that have 8
- * bytes of preamble for each frame). This stat can be configured
- * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
- * even the preamble octets of "frames" as small one byte of preamble
- * @rx_data_octets: Count of data and padding octets of successfully received
- * frames. Does not include frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_offload_octets: Count of total octets, not including framing
- * characters, of offloaded received frames that are passed
- * to the host.
- * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
- * nonbroadcast group address. Does not include frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
- * the broadcast group address. Does not include frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_accepted_ucast_frms: Count of successfully received frames containing
- * a unicast address. Only includes frames that are passed to
- * the system.
- * @rx_accepted_nucast_frms: Count of successfully received frames containing
- * a non-unicast (broadcast or multicast) address. Only includes
- * frames that are passed to the system. Could include, for instance,
- * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
- * register is set to pass FCS-errored frames to the host.
- * @rx_tagged_frms: Count of received frames containing a VLAN tag.
- * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
- * + 18 bytes (+ 22 bytes if VLAN-tagged).
- * @rx_usized_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets, that are otherwise well-formed.
- * In other words, counts runts.
- * @rx_osized_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets, that are otherwise
- * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
- * is set to 1, then "more than 1518 octets" becomes "more than 1518
- * (1522 if VLAN-tagged) octets".
- * @rx_frag_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets that had bad FCS. In other
- * words, counts fragments.
- * @rx_jabber_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets that had bad FCS. In other
- * words, counts jabbers. Note: If register
- * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
- * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
- * octets".
- * @rx_ttl_64_frms: Count of total received MAC frames with length (including
- * FCS, but not framing bits) of exactly 64 octets. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ttl_65_127_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 65 and 127
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_128_255_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 128 and 255
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_256_511_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 256 and 511
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 512 and 1023
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1024 and 1518
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1519 and 4095
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 4096 and 8191
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 8192 and
- * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) exceeding
- * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
- * Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
- * @rx_accepted_ip: Count of received IP datagrams that
- * are passed to the system.
- * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
- * errored IP datagrams.
- * @rx_err_ip: Count of received IP datagrams containing errors. For example,
- * bad IP checksum.
- * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
- * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
- * Note: This stat contains a count of all received TCP segments,
- * regardless of whether or not they pertain to an established
- * connection.
- * @rx_udp: Count of received UDP datagrams.
- * @rx_err_tcp: Count of received TCP segments containing errors. For example,
- * bad TCP checksum.
- * @rx_pause_count: Count of number of pause quanta that the MAC has been in
- * the paused state. Recall, one pause quantum equates to 512
- * bit times.
- * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
- * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
- * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
- * this register is a count of all received MAC control frames.
- * Note: This stat may be configured to count all layer 2 errors
- * (i.e. length errors and FCS errors).
- * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
- * not include frames received with frame-too-long or
- * frame-too-short error.
- * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
- * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
- * for VLAN-tagged frames), inclusive, that does not match the
- * number of data octets (including pad) received. Also contains
- * a count of received frames with a length/type field less than
- * 46 (42 for VLAN-tagged frames) and the number of data octets
- * (including pad) received is greater than 46 (42 for VLAN-tagged
- * frames).
- * @rx_out_rng_len_err_frms: Count of received frames with length/type field
- * between 1501 and 1535 decimal, inclusive.
- * @rx_drop_frms: Count of received frames that could not be passed to the host.
- * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
- * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
- * for a list of reasons. Because the RMAC drops one frame at a time,
- * this stat also indicates the number of drop events.
- * @rx_discarded_frms: Count of received frames containing
- * any error that prevents
- * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
- * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
- * reasons.
- * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
- * host. See PORTn_RX_DROP_FRMS for a list of reasons.
- * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
- * host. See PORTn_RX_DROP_FRMS for a list of reasons.
- * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
- * port.
- * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
- * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
- * that carry the Slow Protocols EtherType, but contain an unknown
- * PDU. Or frames that contain the Slow Protocols group MAC address,
- * but do not carry the Slow Protocols EtherType.
- * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
- * this Aggregation port.
- * @rx_fcs_discard: Count of received frames that are discarded because the
- * FCS check failed.
- * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
- * that carry the Slow Protocols EtherType, but contain a badly
- * formed PDU. Or frames that carry the Slow Protocols EtherType,
- * but contain an illegal value of Protocol Subtype.
- * @rx_switch_discard: Count of received frames that are discarded by the
- * internal switch because they did not have an entry in the
- * Filtering Database. This includes frames that had an invalid
- * destination MAC address or VLAN ID. It also includes frames are
- * discarded because they did not satisfy the length requirements
- * of the target VPATH.
- * @rx_len_discard: Count of received frames that are discarded because of an
- * invalid frame length (includes fragments, oversized frames and
- * mismatch between frame length and length/type field). This stat
- * can be configured
- * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
- * @rx_rpa_discard: Count of received frames that were discarded because the
- * receive protocol assist (RPA) discovered and error in the frame
- * or was unable to parse the frame.
- * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
- * Link Aggregation Control Protocol (LACP) frames, etc.) that are
- * discarded.
- * @rx_rts_discard: Count of received frames that are discarded by the receive
- * traffic steering (RTS) logic. Includes those frame discarded
- * because the SSC response contradicted the switch table, because
- * the SSC timed out, or because the target queue could not fit the
- * frame.
- * @rx_trash_discard: Count of received frames that are discarded because
- * receive traffic steering (RTS) steered the frame to the trash
- * queue.
- * @rx_buff_full_discard: Count of received frames that are discarded because
- * internal buffers are full. Includes frames discarded because the
- * RTS logic is waiting for an SSC lookup that has no timeout bound.
- * Also, includes frames that are dropped because the MAC2FAU buffer
- * is nearly full -- this can happen if the external receive buffer
- * is full and the receive path is backing up.
- * @rx_red_discard: Count of received frames that are discarded because of RED
- * (Random Early Discard).
- * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
- * characters occurring between times of normal data transmission
- * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
- * incremented when either -
- * 1) The Reconciliation Sublayer (RS) is expecting one control
- * character and gets another (i.e. is expecting a Start
- * character, but gets another control character).
- * 2) Start control character is not in lane 0
- * Only increments the count by one for each XGMII column.
- * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
- * during normal data transmission. If the Reconciliation Sublayer
- * (RS) receives a control character, other than a terminate control
- * character, during receipt of data octets then this register is
- * incremented. Also increments if the start frame delimiter is not
- * found in the correct location. Only increments the count by one
- * for each XGMII column.
- * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
- * to /E/ (i.e. the error character), thus the statistic tracks the
- * number of Error characters received at any time.
- * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
- * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
- * Only includes symbol errors that are observed between the XGMII
- * Start Frame Delimiter and End Frame Delimiter, inclusive. And
- * only increments the count by one for each frame.
- * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns received at any
- * time.
- * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
- * to /E/ (i.e. the error character), thus the statistic tracks the
- * number of Error characters received at any time.
- * @rx_local_fault: Maintains a count of the number of times that link
- * transitioned from "up" to "down" due to a local fault.
- * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
- * that match a pattern that is programmable through register
- * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
- * to 4 x /E/ (i.e. a column containing all error characters), thus
- * the statistic tracks the number of Error columns received at any
- * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
- * to 1, then this stat increments when COLUMN2 is found within 'n'
- * clocks after COLUMN1. Here, 'n' is defined by
- * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
- * 0, then it means to search anywhere for COLUMN2).
- * @rx_jettison: Count of received frames that are jettisoned because internal
- * buffers are full.
- * @rx_remote_fault: Maintains a count of the number of times that link
- * transitioned from "up" to "down" due to a remote fault.
- *
- * XMAC Port Statistics.
- */
-struct vxge_hw_xmac_port_stats {
-/*0x000*/ u64 tx_ttl_frms;
-/*0x008*/ u64 tx_ttl_octets;
-/*0x010*/ u64 tx_data_octets;
-/*0x018*/ u64 tx_mcast_frms;
-/*0x020*/ u64 tx_bcast_frms;
-/*0x028*/ u64 tx_ucast_frms;
-/*0x030*/ u64 tx_tagged_frms;
-/*0x038*/ u64 tx_vld_ip;
-/*0x040*/ u64 tx_vld_ip_octets;
-/*0x048*/ u64 tx_icmp;
-/*0x050*/ u64 tx_tcp;
-/*0x058*/ u64 tx_rst_tcp;
-/*0x060*/ u64 tx_udp;
-/*0x068*/ u32 tx_parse_error;
-/*0x06c*/ u32 tx_unknown_protocol;
-/*0x070*/ u64 tx_pause_ctrl_frms;
-/*0x078*/ u32 tx_marker_pdu_frms;
-/*0x07c*/ u32 tx_lacpdu_frms;
-/*0x080*/ u32 tx_drop_ip;
-/*0x084*/ u32 tx_marker_resp_pdu_frms;
-/*0x088*/ u32 tx_xgmii_char2_match;
-/*0x08c*/ u32 tx_xgmii_char1_match;
-/*0x090*/ u32 tx_xgmii_column2_match;
-/*0x094*/ u32 tx_xgmii_column1_match;
-/*0x098*/ u32 unused1;
-/*0x09c*/ u16 tx_any_err_frms;
-/*0x09e*/ u16 tx_drop_frms;
-/*0x0a0*/ u64 rx_ttl_frms;
-/*0x0a8*/ u64 rx_vld_frms;
-/*0x0b0*/ u64 rx_offload_frms;
-/*0x0b8*/ u64 rx_ttl_octets;
-/*0x0c0*/ u64 rx_data_octets;
-/*0x0c8*/ u64 rx_offload_octets;
-/*0x0d0*/ u64 rx_vld_mcast_frms;
-/*0x0d8*/ u64 rx_vld_bcast_frms;
-/*0x0e0*/ u64 rx_accepted_ucast_frms;
-/*0x0e8*/ u64 rx_accepted_nucast_frms;
-/*0x0f0*/ u64 rx_tagged_frms;
-/*0x0f8*/ u64 rx_long_frms;
-/*0x100*/ u64 rx_usized_frms;
-/*0x108*/ u64 rx_osized_frms;
-/*0x110*/ u64 rx_frag_frms;
-/*0x118*/ u64 rx_jabber_frms;
-/*0x120*/ u64 rx_ttl_64_frms;
-/*0x128*/ u64 rx_ttl_65_127_frms;
-/*0x130*/ u64 rx_ttl_128_255_frms;
-/*0x138*/ u64 rx_ttl_256_511_frms;
-/*0x140*/ u64 rx_ttl_512_1023_frms;
-/*0x148*/ u64 rx_ttl_1024_1518_frms;
-/*0x150*/ u64 rx_ttl_1519_4095_frms;
-/*0x158*/ u64 rx_ttl_4096_8191_frms;
-/*0x160*/ u64 rx_ttl_8192_max_frms;
-/*0x168*/ u64 rx_ttl_gt_max_frms;
-/*0x170*/ u64 rx_ip;
-/*0x178*/ u64 rx_accepted_ip;
-/*0x180*/ u64 rx_ip_octets;
-/*0x188*/ u64 rx_err_ip;
-/*0x190*/ u64 rx_icmp;
-/*0x198*/ u64 rx_tcp;
-/*0x1a0*/ u64 rx_udp;
-/*0x1a8*/ u64 rx_err_tcp;
-/*0x1b0*/ u64 rx_pause_count;
-/*0x1b8*/ u64 rx_pause_ctrl_frms;
-/*0x1c0*/ u64 rx_unsup_ctrl_frms;
-/*0x1c8*/ u64 rx_fcs_err_frms;
-/*0x1d0*/ u64 rx_in_rng_len_err_frms;
-/*0x1d8*/ u64 rx_out_rng_len_err_frms;
-/*0x1e0*/ u64 rx_drop_frms;
-/*0x1e8*/ u64 rx_discarded_frms;
-/*0x1f0*/ u64 rx_drop_ip;
-/*0x1f8*/ u64 rx_drop_udp;
-/*0x200*/ u32 rx_marker_pdu_frms;
-/*0x204*/ u32 rx_lacpdu_frms;
-/*0x208*/ u32 rx_unknown_pdu_frms;
-/*0x20c*/ u32 rx_marker_resp_pdu_frms;
-/*0x210*/ u32 rx_fcs_discard;
-/*0x214*/ u32 rx_illegal_pdu_frms;
-/*0x218*/ u32 rx_switch_discard;
-/*0x21c*/ u32 rx_len_discard;
-/*0x220*/ u32 rx_rpa_discard;
-/*0x224*/ u32 rx_l2_mgmt_discard;
-/*0x228*/ u32 rx_rts_discard;
-/*0x22c*/ u32 rx_trash_discard;
-/*0x230*/ u32 rx_buff_full_discard;
-/*0x234*/ u32 rx_red_discard;
-/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
-/*0x23c*/ u32 rx_xgmii_data_err_cnt;
-/*0x240*/ u32 rx_xgmii_char1_match;
-/*0x244*/ u32 rx_xgmii_err_sym;
-/*0x248*/ u32 rx_xgmii_column1_match;
-/*0x24c*/ u32 rx_xgmii_char2_match;
-/*0x250*/ u32 rx_local_fault;
-/*0x254*/ u32 rx_xgmii_column2_match;
-/*0x258*/ u32 rx_jettison;
-/*0x25c*/ u32 rx_remote_fault;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
- *
- * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
- * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
- * not including framing characters (i.e. less framing bits).
- * To determine the total octets of transmitted frames, including
- * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
- * this stat (the device always prepends 8 bytes of preamble for
- * each frame)
- * @tx_data_octets: Count of data and padding octets of successfully transmitted
- * frames.
- * @tx_mcast_frms: Count of successfully transmitted frames to a group address
- * other than the broadcast address.
- * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
- * group address.
- * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
- * Includes discarded frames that are not sent to the network.
- * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
- * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
- * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
- * are passed to the network.
- * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
- * to problems within ICMP.
- * @tx_tcp: Count of transmitted TCP segments. Does not include segments
- * containing retransmitted octets.
- * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
- * @tx_udp: Count of transmitted UDP datagrams.
- * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
- * such as a new IPv6 extension header, or an unsupported Routing
- * Type. The packet still has a checksum calculated but it may be
- * incorrect.
- * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
- * to the network. Increments because of: 1) An internal processing
- * error (such as an uncorrectable ECC error). 2) A frame parsing
- * error during IP checksum calculation.
- * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
- * generally occurs when a packet is corrupt somehow, including
- * packets that have IP version mismatches, invalid Layer 2 control
- * fields, etc. L3/L4 checksums are not offloaded, but the packet
- * is still be transmitted.
- * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
- * of transmitted TCP segments. Does not include segments containing
- * retransmitted octets.
- * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
- * total number of segments retransmitted. Retransmitted segments
- * that are sourced by the host are counted by the host.
- * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
- * of transmitted IP datagrams that could not be passed to the
- * network.
- *
- * XMAC Vpath TX Statistics.
- */
-struct vxge_hw_xmac_vpath_tx_stats {
- u64 tx_ttl_eth_frms;
- u64 tx_ttl_eth_octets;
- u64 tx_data_octets;
- u64 tx_mcast_frms;
- u64 tx_bcast_frms;
- u64 tx_ucast_frms;
- u64 tx_tagged_frms;
- u64 tx_vld_ip;
- u64 tx_vld_ip_octets;
- u64 tx_icmp;
- u64 tx_tcp;
- u64 tx_rst_tcp;
- u64 tx_udp;
- u32 tx_unknown_protocol;
- u32 tx_lost_ip;
- u32 unused1;
- u32 tx_parse_error;
- u64 tx_tcp_offload;
- u64 tx_retx_tcp_offload;
- u64 tx_lost_ip_offload;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
- *
- * @rx_ttl_eth_frms: Count of successfully received MAC frames.
- * @rx_vld_frms: Count of successfully received MAC frames. Does not include
- * frames received with frame-too-long, FCS, or length errors.
- * @rx_offload_frms: Count of offloaded received frames that are passed to
- * the host.
- * @rx_ttl_eth_octets: Count of total octets of received frames, not including
- * framing characters (i.e. less framing bits). Only counts octets
- * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
- * before FCS. To determine the total octets of received frames,
- * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
- * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
- * that have the required 8 bytes of preamble).
- * @rx_data_octets: Count of data and padding octets of successfully received
- * frames. Does not include frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_offload_octets: Count of total octets, not including framing characters,
- * of offloaded received frames that are passed to the host.
- * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
- * nonbroadcast group address. Does not include frames received with
- * frame-too-long, FCS, or length errors.
- * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
- * broadcast group address. Does not include frames received with
- * frame-too-long, FCS, or length errors.
- * @rx_accepted_ucast_frms: Count of successfully received frames containing
- * a unicast address. Only includes frames that are passed to the
- * system.
- * @rx_accepted_nucast_frms: Count of successfully received frames containing
- * a non-unicast (broadcast or multicast) address. Only includes
- * frames that are passed to the system. Could include, for instance,
- * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
- * register is set to pass FCS-errored frames to the host.
- * @rx_tagged_frms: Count of received frames containing a VLAN tag.
- * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
- * + 18 bytes (+ 22 bytes if VLAN-tagged).
- * @rx_usized_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets, that are otherwise well-formed.
- * In other words, counts runts.
- * @rx_osized_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets, that are otherwise
- * well-formed.
- * @rx_frag_frms: Count of received frames of length (including FCS, but not
- * framing bits) less than 64 octets that had bad FCS.
- * In other words, counts fragments.
- * @rx_jabber_frms: Count of received frames of length (including FCS, but not
- * framing bits) more than 1518 octets that had bad FCS. In other
- * words, counts jabbers.
- * @rx_ttl_64_frms: Count of total received MAC frames with length (including
- * FCS, but not framing bits) of exactly 64 octets. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ttl_65_127_frms: Count of total received MAC frames
- * with length (including
- * FCS, but not framing bits) of between 65 and 127 octets inclusive.
- * Includes frames received with frame-too-long, FCS,
- * or length errors.
- * @rx_ttl_128_255_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits)
- * of between 128 and 255 octets
- * inclusive. Includes frames received with frame-too-long, FCS,
- * or length errors.
- * @rx_ttl_256_511_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits)
- * of between 256 and 511 octets
- * inclusive. Includes frames received with frame-too-long, FCS, or
- * length errors.
- * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 512 and 1023
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1024 and 1518
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 1519 and 4095
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 4096 and 8191
- * octets inclusive. Includes frames received with frame-too-long,
- * FCS, or length errors.
- * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) of between 8192 and
- * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
- * with frame-too-long, FCS, or length errors.
- * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
- * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
- * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
- * received with frame-too-long, FCS, or length errors.
- * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
- * @rx_accepted_ip: Count of received IP datagrams that
- * are passed to the system.
- * @rx_ip_octets: Count of number of octets in received IP datagrams.
- * Includes errored IP datagrams.
- * @rx_err_ip: Count of received IP datagrams containing errors. For example,
- * bad IP checksum.
- * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
- * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
- * Note: This stat contains a count of all received TCP segments,
- * regardless of whether or not they pertain to an established
- * connection.
- * @rx_udp: Count of received UDP datagrams.
- * @rx_err_tcp: Count of received TCP segments containing errors. For example,
- * bad TCP checksum.
- * @rx_lost_frms: Count of received frames that could not be passed to the host.
- * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
- * for a list of reasons.
- * @rx_lost_ip: Count of received IP datagrams that could not be passed to
- * the host. See RX_LOST_FRMS for a list of reasons.
- * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
- * of received IP datagrams that could not be passed to the host.
- * See RX_LOST_FRMS for a list of reasons.
- * @rx_various_discard: Count of received frames that are discarded because
- * the target receive queue is full.
- * @rx_sleep_discard: Count of received frames that are discarded because the
- * target VPATH is asleep (a Wake-on-LAN magic packet can be used
- * to awaken the VPATH).
- * @rx_red_discard: Count of received frames that are discarded because of RED
- * (Random Early Discard).
- * @rx_queue_full_discard: Count of received frames that are discarded because
- * the target receive queue is full.
- * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
- *
- * XMAC Vpath RX Statistics.
- */
-struct vxge_hw_xmac_vpath_rx_stats {
- u64 rx_ttl_eth_frms;
- u64 rx_vld_frms;
- u64 rx_offload_frms;
- u64 rx_ttl_eth_octets;
- u64 rx_data_octets;
- u64 rx_offload_octets;
- u64 rx_vld_mcast_frms;
- u64 rx_vld_bcast_frms;
- u64 rx_accepted_ucast_frms;
- u64 rx_accepted_nucast_frms;
- u64 rx_tagged_frms;
- u64 rx_long_frms;
- u64 rx_usized_frms;
- u64 rx_osized_frms;
- u64 rx_frag_frms;
- u64 rx_jabber_frms;
- u64 rx_ttl_64_frms;
- u64 rx_ttl_65_127_frms;
- u64 rx_ttl_128_255_frms;
- u64 rx_ttl_256_511_frms;
- u64 rx_ttl_512_1023_frms;
- u64 rx_ttl_1024_1518_frms;
- u64 rx_ttl_1519_4095_frms;
- u64 rx_ttl_4096_8191_frms;
- u64 rx_ttl_8192_max_frms;
- u64 rx_ttl_gt_max_frms;
- u64 rx_ip;
- u64 rx_accepted_ip;
- u64 rx_ip_octets;
- u64 rx_err_ip;
- u64 rx_icmp;
- u64 rx_tcp;
- u64 rx_udp;
- u64 rx_err_tcp;
- u64 rx_lost_frms;
- u64 rx_lost_ip;
- u64 rx_lost_ip_offload;
- u16 rx_various_discard;
- u16 rx_sleep_discard;
- u16 rx_red_discard;
- u16 rx_queue_full_discard;
- u64 rx_mpa_ok_frms;
-} __packed;
-
-/**
- * struct vxge_hw_xmac_stats - XMAC Statistics
- *
- * @aggr_stats: Statistics on aggregate port(port 0, port 1)
- * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
- * @vpath_tx_stats: Per vpath XMAC TX stats
- * @vpath_rx_stats: Per vpath XMAC RX stats
- *
- * XMAC Statistics.
- */
-struct vxge_hw_xmac_stats {
- struct vxge_hw_xmac_aggr_stats
- aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
- struct vxge_hw_xmac_port_stats
- port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
- struct vxge_hw_xmac_vpath_tx_stats
- vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
- struct vxge_hw_xmac_vpath_rx_stats
- vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
- * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
- * for the given VPATH
- * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
- * @ini_num_cpl_rcvd: The number of PCI read completions received by the
- * PIC block
- * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
- * block to the host
- * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
- * the PIC block
- * @wrcrdtarb_xoff: TBD
- * @rdcrdtarb_xoff: TBD
- * @vpath_genstats_count0: TBD
- * @vpath_genstats_count1: TBD
- * @vpath_genstats_count2: TBD
- * @vpath_genstats_count3: TBD
- * @vpath_genstats_count4: TBD
- * @vpath_gennstats_count5: TBD
- * @tx_stats: Transmit stats
- * @rx_stats: Receive stats
- * @prog_event_vnum1: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
- * @prog_event_vnum0: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
- * @prog_event_vnum3: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
- * @prog_event_vnum2: Programmable statistic. Increments when internal logic
- * detects a certain event. See register
- * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
- * @rx_multi_cast_frame_discard: TBD
- * @rx_frm_transferred: TBD
- * @rxd_returned: TBD
- * @rx_mpa_len_fail_frms: Count of received frames
- * that fail the MPA length check
- * @rx_mpa_mrk_fail_frms: Count of received frames
- * that fail the MPA marker check
- * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
- * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
- * frame buffer (and subsequently to the host).
- * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
- * because the VPATH is in reset
- * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
- * whenever the received frame matches the VPATH's Wake-on-LAN
- * signature(s) CRC.
- * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
- * because the VPATH is in reset. Includes frames that are discarded
- * because the current VPIN does not match that VPIN of the frame
- *
- * Titan vpath hardware statistics.
- */
-struct vxge_hw_vpath_stats_hw_info {
-/*0x000*/ u32 ini_num_mwr_sent;
-/*0x004*/ u32 unused1;
-/*0x008*/ u32 ini_num_mrd_sent;
-/*0x00c*/ u32 unused2;
-/*0x010*/ u32 ini_num_cpl_rcvd;
-/*0x014*/ u32 unused3;
-/*0x018*/ u64 ini_num_mwr_byte_sent;
-/*0x020*/ u64 ini_num_cpl_byte_rcvd;
-/*0x028*/ u32 wrcrdtarb_xoff;
-/*0x02c*/ u32 unused4;
-/*0x030*/ u32 rdcrdtarb_xoff;
-/*0x034*/ u32 unused5;
-/*0x038*/ u32 vpath_genstats_count0;
-/*0x03c*/ u32 vpath_genstats_count1;
-/*0x040*/ u32 vpath_genstats_count2;
-/*0x044*/ u32 vpath_genstats_count3;
-/*0x048*/ u32 vpath_genstats_count4;
-/*0x04c*/ u32 unused6;
-/*0x050*/ u32 vpath_genstats_count5;
-/*0x054*/ u32 unused7;
-/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
-/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
-/*0x220*/ u64 unused9;
-/*0x228*/ u32 prog_event_vnum1;
-/*0x22c*/ u32 prog_event_vnum0;
-/*0x230*/ u32 prog_event_vnum3;
-/*0x234*/ u32 prog_event_vnum2;
-/*0x238*/ u16 rx_multi_cast_frame_discard;
-/*0x23a*/ u8 unused10[6];
-/*0x240*/ u32 rx_frm_transferred;
-/*0x244*/ u32 unused11;
-/*0x248*/ u16 rxd_returned;
-/*0x24a*/ u8 unused12[6];
-/*0x252*/ u16 rx_mpa_len_fail_frms;
-/*0x254*/ u16 rx_mpa_mrk_fail_frms;
-/*0x256*/ u16 rx_mpa_crc_fail_frms;
-/*0x258*/ u16 rx_permitted_frms;
-/*0x25c*/ u64 rx_vp_reset_discarded_frms;
-/*0x25e*/ u64 rx_wol_frms;
-/*0x260*/ u64 tx_vp_reset_discarded_frms;
-} __packed;
-
-
-/**
- * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
- * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
- * by the adapter that were discarded because the VPATH is out of service
- * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
- * adapter that were discared because the VPATH is out of service
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
- * the posted header credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
- * the posted data credits for upstream PCI writes were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
- * the non-posted header credits for upstream PCI reads were depleted
- * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
- * the adapter that were discarded because the VPATH instance number does
- * not match
- * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
- * by the adapter that were discarded because the VPATH instance number
- * does not match
- * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
- * to the GENSTATS0_CFG for information on configuring this statistic
- * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
- * to the GENSTATS1_CFG for information on configuring this statistic
- * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
- * to the GENSTATS2_CFG for information on configuring this statistic
- * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
- * to the GENSTATS3_CFG for information on configuring this statistic
- * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
- * to the GENSTATS4_CFG for information on configuring this statistic
- * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
- * to the GENSTATS5_CFG for information on configuring this statistic
- * @pci.rstdrop_cpl 0x01c8 4
- * @pci.rstdrop_msg 0x01cc 4
- * @pci.rstdrop_client1 0x01d0 4
- * @pci.rstdrop_client0 0x01d4 4
- * @pci.rstdrop_client2 0x01d8 4
- * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
- * header credits were depleted
- * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
- * header credits were depleted
- * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
- * header credits were depleted
- * @pci.depl_cplh[vplane1] 0x01ea 2
- * @pci.depl_nph[vplane1] 0x01ec 2
- * @pci.depl_ph[vplane1] 0x01ee 2
- * @pci.depl_cplh[vplane2] 0x01f2 2
- * @pci.depl_nph[vplane2] 0x01f4 2
- * @pci.depl_ph[vplane2] 0x01f6 2
- * @pci.depl_cplh[vplane3] 0x01fa 2
- * @pci.depl_nph[vplane3] 0x01fc 2
- * @pci.depl_ph[vplane3] 0x01fe 2
- * @pci.depl_cplh[vplane4] 0x0202 2
- * @pci.depl_nph[vplane4] 0x0204 2
- * @pci.depl_ph[vplane4] 0x0206 2
- * @pci.depl_cplh[vplane5] 0x020a 2
- * @pci.depl_nph[vplane5] 0x020c 2
- * @pci.depl_ph[vplane5] 0x020e 2
- * @pci.depl_cplh[vplane6] 0x0212 2
- * @pci.depl_nph[vplane6] 0x0214 2
- * @pci.depl_ph[vplane6] 0x0216 2
- * @pci.depl_cplh[vplane7] 0x021a 2
- * @pci.depl_nph[vplane7] 0x021c 2
- * @pci.depl_ph[vplane7] 0x021e 2
- * @pci.depl_cplh[vplane8] 0x0222 2
- * @pci.depl_nph[vplane8] 0x0224 2
- * @pci.depl_ph[vplane8] 0x0226 2
- * @pci.depl_cplh[vplane9] 0x022a 2
- * @pci.depl_nph[vplane9] 0x022c 2
- * @pci.depl_ph[vplane9] 0x022e 2
- * @pci.depl_cplh[vplane10] 0x0232 2
- * @pci.depl_nph[vplane10] 0x0234 2
- * @pci.depl_ph[vplane10] 0x0236 2
- * @pci.depl_cplh[vplane11] 0x023a 2
- * @pci.depl_nph[vplane11] 0x023c 2
- * @pci.depl_ph[vplane11] 0x023e 2
- * @pci.depl_cplh[vplane12] 0x0242 2
- * @pci.depl_nph[vplane12] 0x0244 2
- * @pci.depl_ph[vplane12] 0x0246 2
- * @pci.depl_cplh[vplane13] 0x024a 2
- * @pci.depl_nph[vplane13] 0x024c 2
- * @pci.depl_ph[vplane13] 0x024e 2
- * @pci.depl_cplh[vplane14] 0x0252 2
- * @pci.depl_nph[vplane14] 0x0254 2
- * @pci.depl_ph[vplane14] 0x0256 2
- * @pci.depl_cplh[vplane15] 0x025a 2
- * @pci.depl_nph[vplane15] 0x025c 2
- * @pci.depl_ph[vplane15] 0x025e 2
- * @pci.depl_cplh[vplane16] 0x0262 2
- * @pci.depl_nph[vplane16] 0x0264 2
- * @pci.depl_ph[vplane16] 0x0266 2
- * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
- * credits were depleted
- * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
- * credits were depleted
- * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
- * credits were depleted
- * @pci.depl_cpld[vplane1] 0x0272 2
- * @pci.depl_npd[vplane1] 0x0274 2
- * @pci.depl_pd[vplane1] 0x0276 2
- * @pci.depl_cpld[vplane2] 0x027a 2
- * @pci.depl_npd[vplane2] 0x027c 2
- * @pci.depl_pd[vplane2] 0x027e 2
- * @pci.depl_cpld[vplane3] 0x0282 2
- * @pci.depl_npd[vplane3] 0x0284 2
- * @pci.depl_pd[vplane3] 0x0286 2
- * @pci.depl_cpld[vplane4] 0x028a 2
- * @pci.depl_npd[vplane4] 0x028c 2
- * @pci.depl_pd[vplane4] 0x028e 2
- * @pci.depl_cpld[vplane5] 0x0292 2
- * @pci.depl_npd[vplane5] 0x0294 2
- * @pci.depl_pd[vplane5] 0x0296 2
- * @pci.depl_cpld[vplane6] 0x029a 2
- * @pci.depl_npd[vplane6] 0x029c 2
- * @pci.depl_pd[vplane6] 0x029e 2
- * @pci.depl_cpld[vplane7] 0x02a2 2
- * @pci.depl_npd[vplane7] 0x02a4 2
- * @pci.depl_pd[vplane7] 0x02a6 2
- * @pci.depl_cpld[vplane8] 0x02aa 2
- * @pci.depl_npd[vplane8] 0x02ac 2
- * @pci.depl_pd[vplane8] 0x02ae 2
- * @pci.depl_cpld[vplane9] 0x02b2 2
- * @pci.depl_npd[vplane9] 0x02b4 2
- * @pci.depl_pd[vplane9] 0x02b6 2
- * @pci.depl_cpld[vplane10] 0x02ba 2
- * @pci.depl_npd[vplane10] 0x02bc 2
- * @pci.depl_pd[vplane10] 0x02be 2
- * @pci.depl_cpld[vplane11] 0x02c2 2
- * @pci.depl_npd[vplane11] 0x02c4 2
- * @pci.depl_pd[vplane11] 0x02c6 2
- * @pci.depl_cpld[vplane12] 0x02ca 2
- * @pci.depl_npd[vplane12] 0x02cc 2
- * @pci.depl_pd[vplane12] 0x02ce 2
- * @pci.depl_cpld[vplane13] 0x02d2 2
- * @pci.depl_npd[vplane13] 0x02d4 2
- * @pci.depl_pd[vplane13] 0x02d6 2
- * @pci.depl_cpld[vplane14] 0x02da 2
- * @pci.depl_npd[vplane14] 0x02dc 2
- * @pci.depl_pd[vplane14] 0x02de 2
- * @pci.depl_cpld[vplane15] 0x02e2 2
- * @pci.depl_npd[vplane15] 0x02e4 2
- * @pci.depl_pd[vplane15] 0x02e6 2
- * @pci.depl_cpld[vplane16] 0x02ea 2
- * @pci.depl_npd[vplane16] 0x02ec 2
- * @pci.depl_pd[vplane16] 0x02ee 2
- * @xgmac_port[3];
- * @xgmac_aggr[2];
- * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
- * Increments when internal logic detects a certain event. See register
- * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
- * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
- * Increments when internal logic detects a certain event. See register
- * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
- * @xgmac.orp_lro_events 0x0af8 8
- * @xgmac.orp_bs_events 0x0b00 8
- * @xgmac.orp_iwarp_events 0x0b08 8
- * @xgmac.tx_permitted_frms 0x0b14 4
- * @xgmac.port2_tx_any_frms 0x0b1d 1
- * @xgmac.port1_tx_any_frms 0x0b1e 1
- * @xgmac.port0_tx_any_frms 0x0b1f 1
- * @xgmac.port2_rx_any_frms 0x0b25 1
- * @xgmac.port1_rx_any_frms 0x0b26 1
- * @xgmac.port0_rx_any_frms 0x0b27 1
- *
- * Titan mrpcim hardware statistics.
- */
-struct vxge_hw_device_stats_mrpcim_info {
-/*0x0000*/ u32 pic_ini_rd_drop;
-/*0x0004*/ u32 pic_ini_wr_drop;
-/*0x0008*/ struct {
- /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
- /*0x0004*/ u32 unused1;
- } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
-/*0x0090*/ struct {
- /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
- /*0x0004*/ u32 unused2;
- } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
-/*0x0118*/ struct {
- /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
- /*0x0004*/ u32 unused3;
- } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
-/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
-/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
-/*0x01a8*/ u32 pic_genstats_count0;
-/*0x01ac*/ u32 pic_genstats_count1;
-/*0x01b0*/ u32 pic_genstats_count2;
-/*0x01b4*/ u32 pic_genstats_count3;
-/*0x01b8*/ u32 pic_genstats_count4;
-/*0x01bc*/ u32 unused4;
-/*0x01c0*/ u32 pic_genstats_count5;
-/*0x01c4*/ u32 unused5;
-/*0x01c8*/ u32 pci_rstdrop_cpl;
-/*0x01cc*/ u32 pci_rstdrop_msg;
-/*0x01d0*/ u32 pci_rstdrop_client1;
-/*0x01d4*/ u32 pci_rstdrop_client0;
-/*0x01d8*/ u32 pci_rstdrop_client2;
-/*0x01dc*/ u32 unused6;
-/*0x01e0*/ struct {
- /*0x0000*/ u16 unused7;
- /*0x0002*/ u16 pci_depl_cplh;
- /*0x0004*/ u16 pci_depl_nph;
- /*0x0006*/ u16 pci_depl_ph;
- } pci_depl_h_vplane[17];
-/*0x0268*/ struct {
- /*0x0000*/ u16 unused8;
- /*0x0002*/ u16 pci_depl_cpld;
- /*0x0004*/ u16 pci_depl_npd;
- /*0x0006*/ u16 pci_depl_pd;
- } pci_depl_d_vplane[17];
-/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
-/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
-/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
-/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
-/*0x0af0*/ u64 unused7;
-/*0x0af8*/ u64 unused8;
-/*0x0b00*/ u64 unused9;
-/*0x0b08*/ u64 unused10;
-/*0x0b10*/ u32 unused11;
-/*0x0b14*/ u32 xgmac_tx_permitted_frms;
-/*0x0b18*/ u32 unused12;
-/*0x0b1c*/ u8 unused13;
-/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
-/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
-/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
-/*0x0b20*/ u32 unused14;
-/*0x0b24*/ u8 unused15;
-/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
-/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
-/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
-} __packed;
-
-/**
- * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
- * @vpath_info: VPath statistics
- * @vpath_info_sav: Vpath statistics saved
- *
- * Titan hardware statistics.
- */
-struct vxge_hw_device_stats_hw_info {
- struct vxge_hw_vpath_stats_hw_info
- *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
- struct vxge_hw_vpath_stats_hw_info
- vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_common_info - HW common
- * statistics for queues.
- * @full_cnt: Number of times the queue was full
- * @usage_cnt: usage count.
- * @usage_max: Maximum usage
- * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
- * @total_compl_cnt: Total descriptor completion count.
- *
- * Hw queue counters
- * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
- * struct vxge_hw_vpath_stats_sw_ring_info{},
- */
-struct vxge_hw_vpath_stats_sw_common_info {
- u32 full_cnt;
- u32 usage_cnt;
- u32 usage_max;
- u32 reserve_free_swaps_cnt;
- u32 total_compl_cnt;
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
- * @common_stats: Common counters for all queues
- * @total_posts: Total number of postings on the queue.
- * @total_buffers: Total number of buffers posted.
- * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
- * (index) in this array reflects the transfer code type, for instance
- * 0xA - "loss of link".
- * Value txd_t_code_err_cnt[i] reflects the
- * number of times the corresponding transfer code was encountered.
- *
- * HW fifo counters
- * See also: struct vxge_hw_vpath_stats_sw_common_info{},
- * struct vxge_hw_vpath_stats_sw_ring_info{},
- */
-struct vxge_hw_vpath_stats_sw_fifo_info {
- struct vxge_hw_vpath_stats_sw_common_info common_stats;
- u32 total_posts;
- u32 total_buffers;
- u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
- * @common_stats: Common counters for all queues
- * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
- * (index) in this array reflects the transfer code type,
- * for instance
- * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
- * Value rxd_t_code_err_cnt[i] reflects the
- * number of times the corresponding transfer code was encountered.
- *
- * HW ring counters
- * See also: struct vxge_hw_vpath_stats_sw_common_info{},
- * struct vxge_hw_vpath_stats_sw_fifo_info{},
- */
-struct vxge_hw_vpath_stats_sw_ring_info {
- struct vxge_hw_vpath_stats_sw_common_info common_stats;
- u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
-
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
- * @unknown_alarms:
- * @network_sustained_fault:
- * @network_sustained_ok:
- * @kdfcctl_fifo0_overwrite:
- * @kdfcctl_fifo0_poison:
- * @kdfcctl_fifo0_dma_error:
- * @dblgen_fifo0_overflow:
- * @statsb_pif_chain_error:
- * @statsb_drop_timeout:
- * @target_illegal_access:
- * @ini_serr_det:
- * @prc_ring_bumps:
- * @prc_rxdcm_sc_err:
- * @prc_rxdcm_sc_abort:
- * @prc_quanta_size_err:
- *
- * HW vpath error statistics
- */
-struct vxge_hw_vpath_stats_sw_err {
- u32 unknown_alarms;
- u32 network_sustained_fault;
- u32 network_sustained_ok;
- u32 kdfcctl_fifo0_overwrite;
- u32 kdfcctl_fifo0_poison;
- u32 kdfcctl_fifo0_dma_error;
- u32 dblgen_fifo0_overflow;
- u32 statsb_pif_chain_error;
- u32 statsb_drop_timeout;
- u32 target_illegal_access;
- u32 ini_serr_det;
- u32 prc_ring_bumps;
- u32 prc_rxdcm_sc_err;
- u32 prc_rxdcm_sc_abort;
- u32 prc_quanta_size_err;
-};
-
-/**
- * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
- * @soft_reset_cnt: Number of times soft reset is done on this vpath.
- * @error_stats: error counters for the vpath
- * @ring_stats: counters for ring belonging to the vpath
- * @fifo_stats: counters for fifo belonging to the vpath
- *
- * HW vpath sw statistics
- * See also: struct vxge_hw_device_info{} }.
- */
-struct vxge_hw_vpath_stats_sw_info {
- u32 soft_reset_cnt;
- struct vxge_hw_vpath_stats_sw_err error_stats;
- struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
- struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
-};
-
-/**
- * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
- *
- * @not_traffic_intr_cnt: Number of times the host was interrupted
- * without new completions.
- * "Non-traffic interrupt counter".
- * @traffic_intr_cnt: Number of traffic interrupts for the device.
- * @total_intr_cnt: Total number of traffic interrupts for the device.
- * @total_intr_cnt == @traffic_intr_cnt +
- * @not_traffic_intr_cnt
- * @soft_reset_cnt: Number of times soft reset is done on this device.
- * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
- * HW per-device statistics.
- */
-struct vxge_hw_device_stats_sw_info {
- u32 not_traffic_intr_cnt;
- u32 traffic_intr_cnt;
- u32 total_intr_cnt;
- u32 soft_reset_cnt;
- struct vxge_hw_vpath_stats_sw_info
- vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
-};
-
-/**
- * struct vxge_hw_device_stats_sw_err - HW device error statistics.
- * @vpath_alarms: Number of vpath alarms
- *
- * HW Device error stats
- */
-struct vxge_hw_device_stats_sw_err {
- u32 vpath_alarms;
-};
-
-/**
- * struct vxge_hw_device_stats - Contains HW per-device statistics,
- * including hw.
- * @devh: HW device handle.
- * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
- * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
- * space.
- * @hw_info_dma_acch: One more DMA handle used subsequently to free the
- * DMA object. Note that this and the previous handle have
- * physical meaning for Solaris; on Windows and Linux the
- * corresponding value will be simply pointer to PCI device.
- *
- * @hw_dev_info_stats: Titan statistics maintained by the hardware.
- * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
- * of completions per interrupt.
- * @sw_dev_err_stats: HW's "soft" device error statistics.
- *
- * Structure-container of HW per-device statistics. Note that per-channel
- * statistics are kept in separate structures under HW's fifo and ring
- * channels.
- */
-struct vxge_hw_device_stats {
- /* handles */
- struct __vxge_hw_device *devh;
-
- /* HW device hardware statistics */
- struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
-
- /* HW device "soft" stats */
- struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
- struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
-
-};
-
-enum vxge_hw_status vxge_hw_device_hw_stats_enable(
- struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_stats_get(
- struct __vxge_hw_device *devh,
- struct vxge_hw_device_stats_hw_info *hw_stats);
-
-enum vxge_hw_status vxge_hw_driver_stats_get(
- struct __vxge_hw_device *devh,
- struct vxge_hw_device_stats_sw_info *sw_stats);
-
-enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status
-vxge_hw_mrpcim_stats_access(
- struct __vxge_hw_device *devh,
- u32 operation,
- u32 location,
- u32 offset,
- u64 *stat);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
- struct vxge_hw_xmac_stats *xmac_stats);
-
-/**
- * enum enum vxge_hw_mgmt_reg_type - Register types.
- *
- * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
- * @vxge_hw_mgmt_reg_type_toc: TOC Registers
- * @vxge_hw_mgmt_reg_type_common: Common Registers
- * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
- * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
- * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
- * @vxge_hw_mgmt_reg_type_vpath: vpath registers
- *
- * Register type enumaration
- */
-enum vxge_hw_mgmt_reg_type {
- vxge_hw_mgmt_reg_type_legacy = 0,
- vxge_hw_mgmt_reg_type_toc = 1,
- vxge_hw_mgmt_reg_type_common = 2,
- vxge_hw_mgmt_reg_type_mrpcim = 3,
- vxge_hw_mgmt_reg_type_srpcim = 4,
- vxge_hw_mgmt_reg_type_vpmgmt = 5,
- vxge_hw_mgmt_reg_type_vpath = 6
-};
-
-enum vxge_hw_status
-vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
- enum vxge_hw_mgmt_reg_type type,
- u32 index,
- u32 offset,
- u64 *value);
-
-enum vxge_hw_status
-vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
- enum vxge_hw_mgmt_reg_type type,
- u32 index,
- u32 offset,
- u64 value);
-
-/**
- * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
- * @VXGE_HW_RXD_STATE_NONE: Invalid state.
- * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
- * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
- * device.
- * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
- * filling-in and posting later.
- *
- * Titan/HW descriptor states.
- *
- */
-enum vxge_hw_rxd_state {
- VXGE_HW_RXD_STATE_NONE = 0,
- VXGE_HW_RXD_STATE_AVAIL = 1,
- VXGE_HW_RXD_STATE_POSTED = 2,
- VXGE_HW_RXD_STATE_FREED = 3
-};
-
-/**
- * struct vxge_hw_ring_rxd_info - Extended information associated with a
- * completed ring descriptor.
- * @syn_flag: SYN flag
- * @is_icmp: Is ICMP
- * @fast_path_eligible: Fast Path Eligible flag
- * @l3_cksum: in L3 checksum is valid
- * @l3_cksum: Result of IP checksum check (by Titan hardware).
- * This field containing VXGE_HW_L3_CKSUM_OK would mean that
- * the checksum is correct, otherwise - the datagram is
- * corrupted.
- * @l4_cksum: in L4 checksum is valid
- * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
- * This field containing VXGE_HW_L4_CKSUM_OK would mean that
- * the checksum is correct. Otherwise - the packet is
- * corrupted.
- * @frame: Zero or more of enum vxge_hw_frame_type flags.
- * See enum vxge_hw_frame_type{}.
- * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
- * various higher-layer protocols, including (but note restricted to)
- * TCP and UDP. See enum vxge_hw_frame_proto{}.
- * @is_vlan: If vlan tag is valid
- * @vlan: VLAN tag extracted from the received frame.
- * @rth_bucket: RTH bucket
- * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
- * has a matching entry in the Indirection table.
- * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
- * has a matching entry in the Socket Pair Direct Match table.
- * @rth_hash_type: RTH hash code of the function used to calculate the hash.
- * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
- * hardware if RTH is enabled.
- */
-struct vxge_hw_ring_rxd_info {
- u32 syn_flag;
- u32 is_icmp;
- u32 fast_path_eligible;
- u32 l3_cksum_valid;
- u32 l3_cksum;
- u32 l4_cksum_valid;
- u32 l4_cksum;
- u32 frame;
- u32 proto;
- u32 is_vlan;
- u32 vlan;
- u32 rth_bucket;
- u32 rth_it_hit;
- u32 rth_spdm_hit;
- u32 rth_hash_type;
- u32 rth_value;
-};
-/**
- * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
- * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
- * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
- * configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
- * configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
- * presentation configuration mismatch.
- * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
- * such as unknown IPv6 header.
- * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
- * error, such as FCS or ECC).
- * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
- * s) were not appropriately sized and data loss occurred.
- * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
- * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
- * Segment1 exceeded the capacity of Buffer1 and the remainder
- * was placed in Buffer2. Segment2 now starts in Buffer3.
- * No data loss or errors occurred.
- * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
- * assigned buffers has a size of 0 bytes.
- * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
- * VPath Reset or because of a VPIN mismatch.
- * @VXGE_HW_RING_T_CODE_UNUSED: Unused
- * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
- * transfer code condition occurred.
- *
- * Transfer codes returned by adapter.
- */
-enum vxge_hw_ring_tcode {
- VXGE_HW_RING_T_CODE_OK = 0x0,
- VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
- VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
- VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
- VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
- VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
- VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
- VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
- VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
- VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
- VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
- VXGE_HW_RING_T_CODE_UNUSED = 0xE,
- VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
-};
-
-enum vxge_hw_status vxge_hw_ring_rxd_reserve(
- struct __vxge_hw_ring *ring_handle,
- void **rxdh);
-
-void
-vxge_hw_ring_rxd_pre_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void
-vxge_hw_ring_rxd_post_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void
-vxge_hw_ring_rxd_post_post_wmb(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-void vxge_hw_ring_rxd_post(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
- struct __vxge_hw_ring *ring_handle,
- void **rxdh,
- u8 *t_code);
-
-enum vxge_hw_status vxge_hw_ring_handle_tcode(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh,
- u8 t_code);
-
-void vxge_hw_ring_rxd_free(
- struct __vxge_hw_ring *ring_handle,
- void *rxdh);
-
-/**
- * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
- * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
- * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
- * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
- * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
- * @VXGE_HW_FRAME_PROTO_TCP: TCP.
- * @VXGE_HW_FRAME_PROTO_UDP: UDP.
- * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
- *
- * Higher layer ethernet protocols and options.
- */
-enum vxge_hw_frame_proto {
- VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
- VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
- VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
- VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
- VXGE_HW_FRAME_PROTO_TCP = 0x02,
- VXGE_HW_FRAME_PROTO_UDP = 0x01,
- VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
- VXGE_HW_FRAME_PROTO_UDP)
-};
-
-/**
- * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
- * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
- * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
- *
- * These gather codes are used to indicate the position of a TxD in a TxD list
- */
-enum vxge_hw_fifo_gather_code {
- VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
- VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
- VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
- VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
-};
-
-/**
- * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
- * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
- * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
- * frame data) returned with corrupt data.
- * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
- * with no data.
- * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
- * frame or LSO MSS that was too long (>9800B).
- * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
- * Offload operation, due to improper header template,
- * unsupported protocol, etc.
- * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
- * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
- * data buffer transfer errors are encountered (see below).
- * Otherwise it is set to 0.
- *
- * These tcodes are returned in various API for TxD status
- */
-enum vxge_hw_fifo_tcode {
- VXGE_HW_FIFO_T_CODE_OK = 0x0,
- VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
- VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
- VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
- VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
- VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
- VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
-};
-
-enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
- struct __vxge_hw_fifo *fifoh,
- void **txdlh,
- void **txdl_priv);
-
-void vxge_hw_fifo_txdl_buffer_set(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh,
- u32 frag_idx,
- dma_addr_t dma_pointer,
- u32 size);
-
-void vxge_hw_fifo_txdl_post(
- struct __vxge_hw_fifo *fifo_handle,
- void *txdlh);
-
-u32 vxge_hw_fifo_free_txdl_count_get(
- struct __vxge_hw_fifo *fifo_handle);
-
-enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
- struct __vxge_hw_fifo *fifoh,
- void **txdlh,
- enum vxge_hw_fifo_tcode *t_code);
-
-enum vxge_hw_status vxge_hw_fifo_handle_tcode(
- struct __vxge_hw_fifo *fifoh,
- void *txdlh,
- enum vxge_hw_fifo_tcode t_code);
-
-void vxge_hw_fifo_txdl_free(
- struct __vxge_hw_fifo *fifoh,
- void *txdlh);
-
-/*
- * Device
- */
-
-#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
-#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
-
-/*
- * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
- * @dma_addr: DMA (mapped) address of _this_ descriptor.
- * @dma_handle: DMA handle used to map the descriptor onto device.
- * @dma_offset: Descriptor's offset in the memory block. HW allocates
- * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
- * bytes. Each memblock is contiguous DMA-able memory. Each
- * memblock contains 1 or more 4KB RxD blocks visible to the
- * Titan hardware.
- * @dma_object: DMA address and handle of the memory block that contains
- * the descriptor. This member is used only in the "checked"
- * version of the HW (to enforce certain assertions);
- * otherwise it gets compiled out.
- * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
- *
- * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
- * information associated with the descriptor. Note that driver can ask HW
- * to allocate additional per-descriptor space for its own (driver-specific)
- * purposes.
- */
-struct __vxge_hw_ring_rxd_priv {
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- ptrdiff_t dma_offset;
-#ifdef VXGE_DEBUG_ASSERT
- struct vxge_hw_mempool_dma *dma_object;
-#endif
-};
-
-struct vxge_hw_mempool_cbs {
- void (*item_func_alloc)(
- struct vxge_hw_mempool *mempoolh,
- u32 memblock_index,
- struct vxge_hw_mempool_dma *dma_object,
- u32 index,
- u32 is_last);
-};
-
-#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
- ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
-
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 action,
- u32 rts_table,
- u32 offset,
- u64 *data1,
- u64 *data2);
-
-enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 action,
- u32 rts_table,
- u32 offset,
- u64 data1,
- u64 data2);
-
-enum vxge_hw_status
-__vxge_hw_vpath_enable(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-void vxge_hw_device_intr_enable(
- struct __vxge_hw_device *devh);
-
-u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
-
-void vxge_hw_device_intr_disable(
- struct __vxge_hw_device *devh);
-
-void vxge_hw_device_mask_all(
- struct __vxge_hw_device *devh);
-
-void vxge_hw_device_unmask_all(
- struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_hw_device_begin_irq(
- struct __vxge_hw_device *devh,
- u32 skip_alarms,
- u64 *reason);
-
-void vxge_hw_device_clear_tx_rx(
- struct __vxge_hw_device *devh);
-
-/*
- * Virtual Paths
- */
-
-void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
-
-void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
-
-u32 vxge_hw_vpath_id(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_vpath_mac_addr_add_mode {
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
- VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
- VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
-};
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask,
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u8 *macaddr,
- u8 *macaddr_mask);
-
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 vid);
-
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 vid);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_add(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *etype);
-
-enum vxge_hw_status
-vxge_hw_vpath_etype_delete(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 etype);
-
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_mcast_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_poll_rx(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status vxge_hw_vpath_poll_tx(
- struct __vxge_hw_fifo *fifoh,
- struct sk_buff ***skb_ptr, int nr_skb, int *more);
-
-enum vxge_hw_status vxge_hw_vpath_alarm_process(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u32 skip_alarms);
-
-void
-vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
- int *tim_msix_id, int alarm_msix_id);
-
-void
-vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
-
-void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
-
-void
-vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-enum vxge_hw_status vxge_hw_vpath_intr_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status vxge_hw_vpath_intr_disable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void vxge_hw_vpath_inta_mask_tx_rx(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void vxge_hw_vpath_inta_unmask_tx_rx(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-void
-vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
-
-void
-vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
- void **dtrh);
-
-void
-vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
-
-void
-vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
-
-int
-vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
-
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
-
-void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
-
-#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-version.h b/drivers/net/ethernet/neterion/vxge/vxge-version.h
deleted file mode 100644
index b9efa28bab3e..000000000000
--- a/drivers/net/ethernet/neterion/vxge/vxge-version.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- * This software may be used and distributed according to the terms of
- * the GNU General Public License (GPL), incorporated herein by reference.
- * Drivers based on or derived from this code fall under the GPL and must
- * retain the authorship, copyright and license notice. This file is not
- * a complete program and may only be used when the entire operating
- * system is licensed under the GPL.
- * See the file COPYING in this distribution for more information.
- *
- * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
- * Virtualized Server Adapter.
- * Copyright(c) 2002-2010 Exar Corp.
- ******************************************************************************/
-#ifndef VXGE_VERSION_H
-#define VXGE_VERSION_H
-
-#define VXGE_VERSION_MAJOR "2"
-#define VXGE_VERSION_MINOR "5"
-#define VXGE_VERSION_FIX "3"
-#define VXGE_VERSION_BUILD "22640"
-#define VXGE_VERSION_FOR "k"
-
-#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
-
-#define VXGE_DEAD_FW_VER_MAJOR 1
-#define VXGE_DEAD_FW_VER_MINOR 4
-#define VXGE_DEAD_FW_VER_BUILD 4
-
-#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
- VXGE_DEAD_FW_VER_MINOR, \
- VXGE_DEAD_FW_VER_BUILD)
-
-#define VXGE_EPROM_FW_VER_MAJOR 1
-#define VXGE_EPROM_FW_VER_MINOR 6
-#define VXGE_EPROM_FW_VER_BUILD 1
-
-#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
- VXGE_EPROM_FW_VER_MINOR, \
- VXGE_EPROM_FW_VER_BUILD)
-
-#define VXGE_CERT_FW_VER_MAJOR 1
-#define VXGE_CERT_FW_VER_MINOR 8
-#define VXGE_CERT_FW_VER_BUILD 1
-
-#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
- VXGE_CERT_FW_VER_MINOR, \
- VXGE_CERT_FW_VER_BUILD)
-
-#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index ffb6f6d05a07..2b383d92d7f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -149,7 +149,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
}
/* Pre_lag action must be first on action list.
- * If other actions already exist they need pushed forward.
+ * If other actions already exist they need to be pushed forward.
*/
if (act_len)
memmove(nfp_flow->action_data + act_size,
@@ -427,6 +427,12 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
return -EOPNOTSUPP;
}
+ if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: loaded firmware does not support tunnel flag offload");
+ return -EOPNOTSUPP;
+ }
+
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -436,7 +442,8 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
- set_tun->tun_id = ip_tun->key.tun_id;
+ if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY)
+ set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
set_tun->ttl = ip_tun->key.ttl;
@@ -479,12 +486,6 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
}
set_tun->tos = ip_tun->key.tos;
-
- if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
- return -EOPNOTSUPP;
- }
set_tun->tun_flags = ip_tun->key.tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
@@ -674,9 +675,9 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
fl_hl_mask->hop_limit;
break;
case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
- if (mask & ~IPV6_FLOW_LABEL_MASK ||
- exact & ~IPV6_FLOW_LABEL_MASK) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
+ if (mask & ~IPV6_FLOWINFO_MASK ||
+ exact & ~IPV6_FLOWINFO_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow info action");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 68e8a2fb1a29..2df2af1da716 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -96,8 +96,6 @@
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
-#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
-
/* LAG ports */
#define NFP_FL_LAG_OUT 0xC0DE0000
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 7c31a46195b2..b3b2a23b8d89 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -182,7 +182,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
u8 ip_proto = 0;
/* Temporary buffer for mangling keys, 64 is enough to cover max
* struct size of key in various fields that may be mangled.
- * Supported fileds to mangle:
+ * Supported fields to mangle:
* mac_src/mac_dst(struct flow_match_eth_addrs, 12B)
* nw_tos/nw_ttl(struct flow_match_ip, 2B)
* nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B)
@@ -194,7 +194,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
entry1->netdev != entry2->netdev)
return -EINVAL;
- /* check the overlapped fields one by one, the unmasked part
+ /* Check the overlapped fields one by one, the unmasked part
* should not conflict with each other.
*/
if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -563,7 +563,7 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
- /* ip_proto is the only field that needed in later compile_action,
+ /* ip_proto is the only field that is needed in later compile_action,
* needed to set the correct checksum flags. It doesn't really matter
* which input rule's ip_proto field we take as the earlier merge checks
* would have made sure that they don't conflict. We do not know which
@@ -1013,7 +1013,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
nft_m_entry->tc_m_parent = tc_m_entry;
nft_m_entry->nft_parent = nft_entry;
nft_m_entry->tc_flower_cookie = 0;
- /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
+ /* Copy the netdev from the pre_ct entry. When the tc_m_entry was created
* it only combined them if the netdevs were the same, so can use any of them.
*/
nft_m_entry->netdev = pre_ct_entry->netdev;
@@ -1143,7 +1143,7 @@ nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
zt->priv = priv;
zt->nft = NULL;
- /* init the various hash tables and lists*/
+ /* init the various hash tables and lists */
INIT_LIST_HEAD(&zt->pre_ct_list);
INIT_LIST_HEAD(&zt->post_ct_list);
INIT_LIST_HEAD(&zt->nft_flows_list);
@@ -1346,7 +1346,7 @@ static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
*/
if (is_nft_flow) {
- /* Need to iterate through list of nft_flow entries*/
+ /* Need to iterate through list of nft_flow entries */
struct nfp_fl_ct_flow_entry *ct_entry = entry;
list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
@@ -1354,7 +1354,7 @@ static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
cleanup_nft_merge_entry(m_entry);
}
} else {
- /* Need to iterate through list of tc_merged_flow entries*/
+ /* Need to iterate through list of tc_merged_flow entries */
struct nfp_fl_ct_tc_merge *ct_entry = entry;
list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index ede90e086b28..e92860e20a24 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -234,7 +234,7 @@ nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
}
/* To signal the end of a batch, both the switch and last flags are set
- * and the the reserved SYNC group ID is used.
+ * and the reserved SYNC group ID is used.
*/
if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
@@ -576,7 +576,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
group->dirty = true;
group->slave_cnt = slave_count;
- /* Group may have been on queue for removal but is now offloable. */
+ /* Group may have been on queue for removal but is now offloadable. */
group->to_remove = false;
mutex_unlock(&lag->lock);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 74e1b279c13b..0f06ef6e24bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -339,7 +339,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
goto err_free_ctx_entry;
}
- /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
+ /* Do not allocate a mask-id for pre_tun_rules. These flows are used to
* configure the pre_tun table and are never actually send to the
* firmware as an add-flow message. This causes the mask-id allocation
* on the firmware to get out of sync if allocated here.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 9d65459bdba5..83c97154c0c7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -359,7 +359,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_opts(rule, &enc_op);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- /* check if GRE, which has no enc_ports */
+ /* Check if GRE, which has no enc_ports */
if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
@@ -1016,7 +1016,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
nfp_flower_is_merge_flow(sub_flow2))
return -EINVAL;
- /* check if the two flows are already merged */
+ /* Check if the two flows are already merged */
parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
if (rhashtable_lookup_fast(&priv->merge_table,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 3206ba83b1aa..4e5df9f2c372 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -534,7 +534,7 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
}
}
-/* offload tc action, currently only for tc police */
+/* Offload tc action, currently only for tc police */
static const struct rhashtable_params stats_meter_table_params = {
.key_offset = offsetof(struct nfp_meter_entry, meter_id),
@@ -690,7 +690,7 @@ nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
for (i = 0 ; i < action_num; i++) {
- /*set qos associate data for this interface */
+ /* Set qos associate data for this interface */
action = paction + i;
if (action->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
@@ -736,7 +736,7 @@ nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
u32 meter_id;
bool pps;
- /*delete qos associate data for this interface */
+ /* Delete qos associate data for this interface */
if (fl_act->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: qos rate limit offload requires police action");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 97dcf8db7ed2..52f67157bd0f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1072,7 +1072,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
entry->ref_count--;
- /* If del is part of a mod then mac_list is still in use elsewheree. */
+ /* If del is part of a mod then mac_list is still in use elsewhere. */
if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 7db56abaa582..448c1c1afaee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -3,6 +3,7 @@
#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -81,12 +82,11 @@ nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
}
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
@@ -167,30 +167,35 @@ nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
u64_stats_update_end(&r_vec->tx_sync);
}
-static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
+static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb, u64 tls_handle)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
- if (likely(!md_dst && !tls_handle))
- return 0;
- if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
- if (!tls_handle)
- return 0;
- md_dst = NULL;
+ if (unlikely(md_dst || tls_handle)) {
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
}
- md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
+ vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
+
+ if (!(md_dst || tls_handle || vlan_insert))
+ return 0;
+
+ md_bytes = sizeof(meta_id) +
+ !!md_dst * NFP_NET_META_PORTID_SIZE +
+ !!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
+ vlan_insert * NFP_NET_META_VLAN_SIZE;
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- meta_id = 0;
data = skb_push(skb, md_bytes) + md_bytes;
if (md_dst) {
- data -= 4;
+ data -= NFP_NET_META_PORTID_SIZE;
put_unaligned_be32(md_dst->u.port_info.port_id, data);
meta_id = NFP_NET_META_PORTID;
}
@@ -198,13 +203,23 @@ static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
/* conn handle is opaque, we just use u64 to be able to quickly
* compare it to zero
*/
- data -= 8;
+ data -= NFP_NET_META_CONN_HANDLE_SIZE;
memcpy(data, &tls_handle, sizeof(tls_handle));
meta_id <<= NFP_NET_META_FIELD_SIZE;
meta_id |= NFP_NET_META_CONN_HANDLE;
}
+ if (vlan_insert) {
+ data -= NFP_NET_META_VLAN_SIZE;
+ /* data type of skb->vlan_proto is __be16
+ * so it fills metadata without calling put_unaligned_be16
+ */
+ memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
+ put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_VLAN;
+ }
- data -= 4;
+ data -= sizeof(meta_id);
put_unaligned_be32(meta_id, data);
return md_bytes;
@@ -258,7 +273,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- md_bytes = nfp_nfd3_prep_tx_meta(skb, tls_handle);
+ md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle);
if (unlikely(md_bytes < 0))
goto err_flush;
@@ -282,7 +297,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes;
txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
txd->data_len = cpu_to_le16(skb->len);
txd->flags = 0;
@@ -320,7 +335,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
txd = &tx_ring->txds[wr_idx];
txd->dma_len = cpu_to_le16(fsize);
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
txd->offset_eop = md_bytes |
((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0);
txd->vals8[1] = second_half;
@@ -562,8 +577,12 @@ nfp_nfd3_rx_give_one(const struct nfp_net_dp *dp,
/* Fill freelist descriptor */
rx_ring->rxds[wr_idx].fld.reserved = 0;
rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- dma_addr + dp->rx_dma_off);
+ /* DMA address is expanded to 48-bit width in freelist for NFP3800,
+ * so the *_48b macro is used accordingly, it's also OK to fill
+ * a 40-bit address since the top 8 bits are get set to 0.
+ */
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
@@ -700,7 +719,7 @@ bool
nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
- u32 meta_info;
+ u32 meta_info, vlan_info;
meta_info = get_unaligned_be32(data);
data += 4;
@@ -718,6 +737,17 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_VLAN:
+ vlan_info = get_unaligned_be32(data);
+ if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
+ meta->vlan.stripped = true;
+ meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
+ vlan_info);
+ meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
+ vlan_info);
+ }
+ data += 4;
+ break;
case NFP_NET_META_PORTID:
meta->portid = get_unaligned_be32(data);
data += 4;
@@ -817,7 +847,7 @@ nfp_nfd3_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = NFD3_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
+ nfp_desc_set_dma_addr_40b(txd, rxbuf->dma_addr + dma_off);
txd->data_len = cpu_to_le16(pkt_len);
txd->flags = 0;
@@ -1046,9 +1076,11 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
#endif
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
@@ -1193,7 +1225,7 @@ nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = meta_len | NFD3_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_40b(txd, dma_addr);
txd->data_len = cpu_to_le16(skb->len);
txd->flags = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
index 47604d5e25eb..a03190c9313c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
@@ -247,10 +247,13 @@ nfp_nfd3_print_tx_descs(struct seq_file *file,
NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2 | \
NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS | \
NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_VEPA | \
NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
@@ -260,6 +263,7 @@ const struct nfp_dp_ops nfp_nfd3_ops = {
.version = NFP_NFD_VER_NFD3,
.tx_min_desc_per_pkt = 1,
.cap_mask = NFP_NFD3_CFG_CTRL_SUPPORTED,
+ .dma_mask = DMA_BIT_MASK(40),
.poll = nfp_nfd3_poll,
.xsk_poll = nfp_nfd3_xsk_poll,
.ctrl_poll = nfp_nfd3_ctrl_poll,
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index c16c4b42ecfd..65e243168765 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -40,7 +40,7 @@ nfp_nfd3_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = NFD3_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, xrxbuf->dma_addr + pkt_off);
+ nfp_desc_set_dma_addr_40b(txd, xrxbuf->dma_addr + pkt_off);
txd->data_len = cpu_to_le16(pkt_len);
txd->flags = 0;
@@ -94,9 +94,12 @@ static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, meta))) {
+ dev_kfree_skb_any(skb);
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+
if (meta_xdp)
skb_metadata_set(skb,
xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
@@ -361,10 +364,8 @@ static void nfp_nfd3_xsk_tx(struct nfp_net_tx_ring *tx_ring)
/* Build TX descriptor. */
txd = &tx_ring->txds[wr_idx];
- nfp_desc_set_dma_addr(txd,
- xsk_buff_raw_get_dma(xsk_pool,
- desc[i].addr
- ));
+ nfp_desc_set_dma_addr_40b(txd,
+ xsk_buff_raw_get_dma(xsk_pool, desc[i].addr));
txd->offset_eop = NFD3_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(desc[i].len);
txd->data_len = cpu_to_le16(desc[i].len);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index 805071d64a20..2b427d8ccb2f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -46,28 +46,16 @@ nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
}
segs = skb_shinfo(skb)->gso_segs;
mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
- /* Note: TSO of the packet with metadata prepended to skb is not
- * supported yet, in which case l3/l4_offset and lso_hdrlen need
- * be correctly handled here.
- * Concern:
- * The driver doesn't have md_bytes easily available at this point.
- * The PCI.IN PD ME won't have md_bytes bytes to add to lso_hdrlen,
- * so it needs the full length there. The app MEs might prefer
- * l3_offset and l4_offset relative to the start of packet data,
- * but could probably cope with it being relative to the CTM buf
- * data offset.
- */
txd.l3_offset = l3_offset;
txd.l4_offset = l4_offset;
txd.lso_meta_res = 0;
@@ -182,55 +170,52 @@ close_block:
return 0;
}
-static int nfp_nfdk_prep_port_id(struct sk_buff *skb)
+static int
+nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
+ struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ bool vlan_insert;
+ u32 meta_id = 0;
+ int md_bytes;
- if (likely(!md_dst))
- return 0;
- if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
- return 0;
-
- /* Note: Unsupported case when TSO a skb with metedata prepended.
- * See the comments in `nfp_nfdk_tx_tso` for details.
- */
- if (unlikely(md_dst && skb_is_gso(skb)))
- return -EOPNOTSUPP;
-
- if (unlikely(skb_cow_head(skb, sizeof(md_dst->u.port_info.port_id))))
- return -ENOMEM;
-
- data = skb_push(skb, sizeof(md_dst->u.port_info.port_id));
- put_unaligned_be32(md_dst->u.port_info.port_id, data);
-
- return sizeof(md_dst->u.port_info.port_id);
-}
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
-static int
-nfp_nfdk_prep_tx_meta(struct nfp_app *app, struct sk_buff *skb,
- struct nfp_net_r_vector *r_vec)
-{
- unsigned char *data;
- int res, md_bytes;
- u32 meta_id = 0;
+ vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
- res = nfp_nfdk_prep_port_id(skb);
- if (unlikely(res <= 0))
- return res;
+ if (!(md_dst || vlan_insert))
+ return 0;
- md_bytes = res;
- meta_id = NFP_NET_META_PORTID;
+ md_bytes = sizeof(meta_id) +
+ !!md_dst * NFP_NET_META_PORTID_SIZE +
+ vlan_insert * NFP_NET_META_VLAN_SIZE;
- if (unlikely(skb_cow_head(skb, sizeof(meta_id))))
+ if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- md_bytes += sizeof(meta_id);
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= NFP_NET_META_PORTID_SIZE;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (vlan_insert) {
+ data -= NFP_NET_META_VLAN_SIZE;
+ /* data type of skb->vlan_proto is __be16
+ * so it fills metadata without calling put_unaligned_be16
+ */
+ memcpy(data, &skb->vlan_proto, sizeof(skb->vlan_proto));
+ put_unaligned_be16(skb_vlan_tag_get(skb), data + sizeof(skb->vlan_proto));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_VLAN;
+ }
meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
FIELD_PREP(NFDK_META_FIELDS, meta_id);
- data = skb_push(skb, sizeof(meta_id));
+ data -= sizeof(meta_id);
put_unaligned_be32(meta_id, data);
return NFDK_DESC_TX_CHAIN_META;
@@ -278,7 +263,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- metadata = nfp_nfdk_prep_tx_meta(nn->app, skb, r_vec);
+ metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb);
if (unlikely((int)metadata < 0))
goto err_flush;
@@ -327,7 +312,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
/* starts at bit 0 */
BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
@@ -352,7 +337,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
dma_len -= dlen_type;
dma_addr += dlen_type + 1;
@@ -608,8 +593,8 @@ nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
/* Fill freelist descriptor */
rx_ring->rxds[wr_idx].fld.reserved = 0;
rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- dma_addr + dp->rx_dma_off);
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
@@ -730,7 +715,7 @@ static bool
nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
- u32 meta_info;
+ u32 meta_info, vlan_info;
meta_info = get_unaligned_be32(data);
data += 4;
@@ -748,6 +733,17 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_VLAN:
+ vlan_info = get_unaligned_be32(data);
+ if (FIELD_GET(NFP_NET_META_VLAN_STRIP, vlan_info)) {
+ meta->vlan.stripped = true;
+ meta->vlan.tpid = FIELD_GET(NFP_NET_META_VLAN_TPID_MASK,
+ vlan_info);
+ meta->vlan.tci = FIELD_GET(NFP_NET_META_VLAN_TCI_MASK,
+ vlan_info);
+ }
+ data += 4;
+ break;
case NFP_NET_META_PORTID:
meta->portid = get_unaligned_be32(data);
data += 4;
@@ -944,7 +940,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
dma_len -= tmp_dlen;
@@ -955,7 +951,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
dma_len -= 1;
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
dlen_type &= NFDK_DESC_TX_DMA_LEN;
dma_len -= dlen_type;
@@ -1185,9 +1181,11 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
+ if (unlikely(!nfp_net_vlan_strip(skb, rxd, &meta))) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
@@ -1349,7 +1347,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
dma_len -= tmp_dlen;
@@ -1360,7 +1358,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
dma_len -= 1;
dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
txd->dma_len_type = cpu_to_le16(dlen_type);
- nfp_nfdk_tx_desc_set_dma_addr(txd, dma_addr);
+ nfp_desc_set_dma_addr_48b(txd, dma_addr);
dlen_type &= NFDK_DESC_TX_DMA_LEN;
dma_len -= dlen_type;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
index 301f11108826..fdb8144a63e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
@@ -168,10 +168,12 @@ nfp_nfdk_print_tx_descs(struct seq_file *file,
NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2 | NFP_NET_CFG_CTRL_RXQINQ | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2 | \
NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD | \
- NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_TXRWB | NFP_NET_CFG_CTRL_VEPA | \
NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
@@ -181,6 +183,7 @@ const struct nfp_dp_ops nfp_nfdk_ops = {
.version = NFP_NFD_VER_NFDK,
.tx_min_desc_per_pkt = NFDK_TX_DESC_PER_SIMPLE_PKT,
.cap_mask = NFP_NFDK_CFG_CTRL_SUPPORTED,
+ .dma_mask = DMA_BIT_MASK(48),
.poll = nfp_nfdk_poll,
.ctrl_poll = nfp_nfdk_ctrl_poll,
.xmit = nfp_nfdk_tx,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 09f250e74dfa..bb3f46c74f77 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -230,7 +230,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
struct nfp_app *app;
if (id >= ARRAY_SIZE(apps) || !apps[id]) {
- nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
+ nfp_err(pf->cpp, "unknown FW app ID 0x%02x, driver too old or support for FW not built in\n", id);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 4f88d17536c3..873429f7a6da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -392,7 +392,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
/* First try to find a firmware image specific for this device */
interface = nfp_cpp_interface(pf->cpp);
nfp_cpp_serial(pf->cpp, &serial);
- sprintf(fw_name, "netronome/serial-%pMF-%02hhx-%02hhx.nffw",
+ sprintf(fw_name, "netronome/serial-%pMF-%02x-%02x.nffw",
serial, interface >> 8, interface & 0xff);
fw = nfp_net_fw_request(pdev, pf, fw_name);
if (fw)
@@ -410,7 +410,9 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
return NULL;
}
- fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "nffw.partno");
+ if (!fw_model)
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
if (!fw_model) {
dev_err(&pdev->dev, "Error: can't read part number\n");
return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 3dd3a92d2e7f..a101ff30a1ae 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -115,7 +115,7 @@ struct nfp_nfdk_tx_buf;
#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
/* Convenience macro for writing dma address into RX/TX descriptors */
-#define nfp_desc_set_dma_addr(desc, dma_addr) \
+#define nfp_desc_set_dma_addr_40b(desc, dma_addr) \
do { \
__typeof__(desc) __d = (desc); \
dma_addr_t __addr = (dma_addr); \
@@ -124,13 +124,13 @@ struct nfp_nfdk_tx_buf;
__d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
} while (0)
-#define nfp_nfdk_tx_desc_set_dma_addr(desc, dma_addr) \
- do { \
- __typeof__(desc) __d = (desc); \
- dma_addr_t __addr = (dma_addr); \
- \
- __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr) & 0xff); \
- __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
+#define nfp_desc_set_dma_addr_48b(desc, dma_addr) \
+ do { \
+ __typeof__(desc) __d = (desc); \
+ dma_addr_t __addr = (dma_addr); \
+ \
+ __d->dma_addr_hi = cpu_to_le16(upper_32_bits(__addr)); \
+ __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
} while (0)
/**
@@ -225,8 +225,8 @@ struct nfp_net_tx_ring {
struct nfp_net_rx_desc {
union {
struct {
- u8 dma_addr_hi; /* High bits of the buf address */
- __le16 reserved; /* Must be zero */
+ __le16 dma_addr_hi; /* High bits of the buf address */
+ u8 reserved; /* Must be zero */
u8 meta_len_dd; /* Must be zero */
__le32 dma_addr_lo; /* Low bits of the buffer address */
@@ -248,6 +248,8 @@ struct nfp_net_rx_desc {
};
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+#define NFP_NET_VLAN_CTAG 0
+#define NFP_NET_VLAN_STAG 1
struct nfp_meta_parsed {
u8 hash_type;
@@ -256,6 +258,11 @@ struct nfp_meta_parsed {
u32 mark;
u32 portid;
__wsum csum;
+ struct {
+ bool stripped;
+ u8 tpid;
+ u16 tci;
+ } vlan;
};
struct nfp_net_rx_hash {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 4e56a99087fa..cf4d6f1129fa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -31,6 +31,7 @@
#include <linux/ethtool.h>
#include <linux/log2.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/ktime.h>
@@ -597,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
return skb;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
resync_pending = tls_offload_tx_resync_pending(skb->sk);
@@ -665,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
return;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
@@ -1694,16 +1695,18 @@ static int nfp_net_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
- new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_RXVLAN;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
if (features & NETIF_F_HW_VLAN_CTAG_TX)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_TXVLAN;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
@@ -1713,6 +1716,13 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
}
+ if (changed & NETIF_F_HW_VLAN_STAG_RX) {
+ if (features & NETIF_F_HW_VLAN_STAG_RX)
+ new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
+ }
+
if (changed & NETIF_F_SG) {
if (features & NETIF_F_SG)
new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
@@ -1742,6 +1752,27 @@ static int nfp_net_set_features(struct net_device *netdev,
}
static netdev_features_t
+nfp_net_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (features & NETIF_F_HW_VLAN_STAG_RX)) {
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ netdev_warn(netdev,
+ "S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
+ } else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ netdev_warn(netdev,
+ "S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
+ }
+ }
+ return features;
+}
+
+static netdev_features_t
nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
@@ -1757,8 +1788,7 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb_is_gso(skb)) {
u32 hdrlen;
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
/* Assume worst case scenario of having longest possible
* metadata prepend - 8B
@@ -1892,6 +1922,69 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
+static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ u16 mode;
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
+ return -EOPNOTSUPP;
+
+ mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
+ nlflags, filter_mask, NULL);
+}
+
+static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem, err;
+ u32 new_ctrl;
+ u16 mode;
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ if (nla_len(attr) < sizeof(mode))
+ return -EINVAL;
+
+ new_ctrl = nn->dp.ctrl;
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA)
+ new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
+ else if (mode == BRIDGE_MODE_VEB)
+ new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
+ else
+ return -EOPNOTSUPP;
+
+ if (new_ctrl == nn->dp.ctrl)
+ return 0;
+
+ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (!err)
+ nn->dp.ctrl = new_ctrl;
+
+ return err;
+ }
+
+ return -EINVAL;
+}
+
const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit,
@@ -1914,11 +2007,14 @@ const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_change_mtu = nfp_net_change_mtu,
.ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
+ .ndo_fix_features = nfp_net_fix_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
.ndo_xsk_wakeup = nfp_net_xsk_wakeup,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+ .ndo_bridge_getlink = nfp_net_bridge_getlink,
+ .ndo_bridge_setlink = nfp_net_bridge_setlink,
};
const struct net_device_ops nfp_nfdk_netdev_ops = {
@@ -1932,6 +2028,7 @@ const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
+ .ndo_set_vf_rate = nfp_app_set_vf_rate,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
.ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
@@ -1942,10 +2039,13 @@ const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_change_mtu = nfp_net_change_mtu,
.ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
+ .ndo_fix_features = nfp_net_fix_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+ .ndo_bridge_getlink = nfp_net_bridge_getlink,
+ .ndo_bridge_setlink = nfp_net_bridge_setlink,
};
static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
@@ -1993,7 +2093,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2002,6 +2102,9 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RXQINQ ? "RXQINQ " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ? "TXVLANv2 " : "",
nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
@@ -2012,6 +2115,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
+ nn->cap & NFP_NET_CFG_CTRL_VEPA ? "VEPA " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
@@ -2040,6 +2144,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings)
{
+ u64 dma_mask = dma_get_mask(&pdev->dev);
struct nfp_net *nn;
int err;
@@ -2085,6 +2190,14 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
goto err_free_nn;
}
+ if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) {
+ dev_err(&pdev->dev,
+ "DMA mask of loaded firmware: %llx, required DMA mask: %llx\n",
+ nn->dp.ops->dma_mask, dma_mask);
+ err = -EINVAL;
+ goto err_free_nn;
+ }
+
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
@@ -2279,31 +2392,39 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->vlan_features = netdev->hw_features;
- if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_RXVLAN;
}
- if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
} else {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
+ NFP_NET_CFG_CTRL_TXVLAN;
}
}
if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
}
+ if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
+ }
netdev->features = netdev->hw_features;
if (nfp_app_has_tc(nn->app) && nn->port)
netdev->hw_features |= NETIF_F_HW_TC;
- /* Advertise but disable TSO by default. */
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
+ /* C-Tag strip and S-Tag strip can't be supported simultaneously,
+ * so enable C-Tag strip and disable S-Tag strip by default.
+ */
+ netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
/* Finalise the netdev setup */
switch (nn->dp.ops->version) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 8892a94f00c3..ac05ec34d69e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -31,10 +31,16 @@
#define NFP_NET_LSO_MAX_HDR_SZ 255
#define NFP_NET_LSO_MAX_SEGS 64
+/* working with metadata vlan api (NFD version >= 2.0) */
+#define NFP_NET_META_VLAN_STRIP BIT(31)
+#define NFP_NET_META_VLAN_TPID_MASK GENMASK(19, 16)
+#define NFP_NET_META_VLAN_TCI_MASK GENMASK(15, 0)
+
/* Prepend field types */
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
+#define NFP_NET_META_VLAN 4 /* ctag or stag type */
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
@@ -42,6 +48,10 @@
#define NFP_META_PORT_ID_CTRL ~0U
+/* Prepend field sizes */
+#define NFP_NET_META_VLAN_SIZE 4
+#define NFP_NET_META_PORTID_SIZE 4
+#define NFP_NET_META_CONN_HANDLE_SIZE 8
/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
@@ -89,11 +99,15 @@
#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
#define NFP_NET_CFG_CTRL_CMSG_DATA (0x1 << 12) /* RX cmsgs on data Qs */
+#define NFP_NET_CFG_CTRL_RXQINQ (0x1 << 13) /* Enable S-tag strip */
+#define NFP_NET_CFG_CTRL_RXVLAN_V2 (0x1 << 15) /* Enable C-tag strip */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
+#define NFP_NET_CFG_CTRL_VEPA (0x1 << 22) /* Enable VEPA mode */
+#define NFP_NET_CFG_CTRL_TXVLAN_V2 (0x1 << 23) /* Enable VLAN C-tag insert*/
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
@@ -110,6 +124,10 @@
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
#define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+#define NFP_NET_CFG_CTRL_RXVLAN_ANY (NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_RXVLAN_V2)
+#define NFP_NET_CFG_CTRL_TXVLAN_ANY (NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_TXVLAN_V2)
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
index 34dd94811df3..550df83b798c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
@@ -440,3 +440,27 @@ bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
return ret;
}
+
+bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta)
+{
+ u16 tpid = 0, tci = 0;
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) {
+ tpid = ETH_P_8021Q;
+ tci = le16_to_cpu(rxd->rxd.vlan);
+ } else if (meta->vlan.stripped) {
+ if (meta->vlan.tpid == NFP_NET_VLAN_CTAG)
+ tpid = ETH_P_8021Q;
+ else if (meta->vlan.tpid == NFP_NET_VLAN_STAG)
+ tpid = ETH_P_8021AD;
+ else
+ return false;
+
+ tci = meta->vlan.tci;
+ }
+ if (tpid)
+ __vlan_hwaccel_put_tag(skb, htons(tpid), tci);
+
+ return true;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
index c934cc2d3208..831c83ce0d3d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
@@ -106,6 +106,8 @@ int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
+bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta);
enum nfp_nfd_version {
NFP_NFD_VER_NFD3,
@@ -117,6 +119,7 @@ enum nfp_nfd_version {
* @version: Indicate dp type
* @tx_min_desc_per_pkt: Minimal TX descs needed for each packet
* @cap_mask: Mask of supported features
+ * @dma_mask: DMA addressing capability
* @poll: Napi poll for normal rx/tx
* @xsk_poll: Napi poll when xsk is enabled
* @ctrl_poll: Tasklet poll for ctrl rx/tx
@@ -134,6 +137,7 @@ struct nfp_dp_ops {
enum nfp_nfd_version version;
unsigned int tx_min_desc_per_pkt;
u32 cap_mask;
+ u64 dma_mask;
int (*poll)(struct napi_struct *napi, int budget);
int (*xsk_poll)(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index df0afd271a21..c922dfab8080 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -29,6 +29,7 @@
#include "nfp_net_dp.h"
#include "nfp_net.h"
#include "nfp_port.h"
+#include "nfpcore/nfp_cpp.h"
struct nfp_et_stat {
char name[ETH_GSTRING_LEN];
@@ -442,6 +443,160 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
+static int nfp_test_link(struct net_device *netdev)
+{
+ if (!netif_carrier_ok(netdev) || !(netdev->flags & IFF_UP))
+ return 1;
+
+ return 0;
+}
+
+static int nfp_test_nsp(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ struct nfp_nsp_identify *nspi;
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_nsp_open(app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_info(netdev, "NSP Test: failed to access the NSP: %d\n", err);
+ goto exit;
+ }
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15) {
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
+ if (!nspi) {
+ err = -ENOMEM;
+ goto exit_close_nsp;
+ }
+
+ err = nfp_nsp_read_identify(nsp, nspi, sizeof(*nspi));
+ if (err < 0)
+ netdev_info(netdev, "NSP Test: reading bsp version failed %d\n", err);
+
+ kfree(nspi);
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+exit:
+ return err;
+}
+
+static int nfp_test_fw(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err;
+
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (err)
+ netdev_info(netdev, "FW Test: update failed %d\n", err);
+
+ return err;
+}
+
+static int nfp_test_reg(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ struct nfp_cpp *cpp = app->cpp;
+ u32 model = nfp_cpp_model(cpp);
+ u32 value;
+ int err;
+
+ err = nfp_cpp_model_autodetect(cpp, &value);
+ if (err < 0) {
+ netdev_info(netdev, "REG Test: NFP model detection failed %d\n", err);
+ return err;
+ }
+
+ return (value == model) ? 0 : 1;
+}
+
+static bool link_test_supported(struct net_device *netdev)
+{
+ return true;
+}
+
+static bool nsp_test_supported(struct net_device *netdev)
+{
+ if (nfp_app_from_netdev(netdev))
+ return true;
+
+ return false;
+}
+
+static bool fw_test_supported(struct net_device *netdev)
+{
+ if (nfp_netdev_is_nfp_net(netdev))
+ return true;
+
+ return false;
+}
+
+static bool reg_test_supported(struct net_device *netdev)
+{
+ if (nfp_app_from_netdev(netdev))
+ return true;
+
+ return false;
+}
+
+static struct nfp_self_test_item {
+ char name[ETH_GSTRING_LEN];
+ bool (*is_supported)(struct net_device *dev);
+ int (*func)(struct net_device *dev);
+} nfp_self_test[] = {
+ {"Link Test", link_test_supported, nfp_test_link},
+ {"NSP Test", nsp_test_supported, nfp_test_nsp},
+ {"Firmware Test", fw_test_supported, nfp_test_fw},
+ {"Register Test", reg_test_supported, nfp_test_reg}
+};
+
+#define NFP_TEST_TOTAL_NUM ARRAY_SIZE(nfp_self_test)
+
+static void nfp_get_self_test_strings(struct net_device *netdev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
+ if (nfp_self_test[i].is_supported(netdev))
+ ethtool_sprintf(&data, nfp_self_test[i].name);
+}
+
+static int nfp_get_self_test_count(struct net_device *netdev)
+{
+ int i, count = 0;
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
+ if (nfp_self_test[i].is_supported(netdev))
+ count++;
+
+ return count;
+}
+
+static void nfp_net_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ int i, ret, count = 0;
+
+ netdev_info(netdev, "Start self test\n");
+
+ for (i = 0; i < NFP_TEST_TOTAL_NUM; i++) {
+ if (nfp_self_test[i].is_supported(netdev)) {
+ ret = nfp_self_test[i].func(netdev);
+ if (ret)
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[count++] = ret;
+ }
+ }
+
+ netdev_info(netdev, "Test end\n");
+}
+
static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -705,6 +860,9 @@ static void nfp_net_get_strings(struct net_device *netdev,
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
break;
+ case ETH_SS_TEST:
+ nfp_get_self_test_strings(netdev, data);
+ break;
}
}
@@ -739,6 +897,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
cnt += nfp_mac_get_stats_count(netdev);
cnt += nfp_app_port_get_stats_count(nn->port);
return cnt;
+ case ETH_SS_TEST:
+ return nfp_get_self_test_count(netdev);
default:
return -EOPNOTSUPP;
}
@@ -757,6 +917,9 @@ static void nfp_port_get_strings(struct net_device *netdev,
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(port, data);
break;
+ case ETH_SS_TEST:
+ nfp_get_self_test_strings(netdev, data);
+ break;
}
}
@@ -786,6 +949,8 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
count = nfp_mac_get_stats_count(netdev);
count += nfp_app_port_get_stats_count(port);
return count;
+ case ETH_SS_TEST:
+ return nfp_get_self_test_count(netdev);
default:
return -EOPNOTSUPP;
}
@@ -1460,6 +1625,55 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static void nfp_port_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return;
+
+ /* Currently pause frame support is fixed */
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+}
+
+static int nfp_net_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Control LED to blink */
+ err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 1);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Control LED to normal mode */
+ err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 0);
+ break;
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -1468,6 +1682,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
+ .self_test = nfp_net_self_test,
.get_strings = nfp_net_get_strings,
.get_ethtool_stats = nfp_net_get_stats,
.get_sset_count = nfp_net_get_sset_count,
@@ -1492,6 +1707,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .get_pauseparam = nfp_port_get_pauseparam,
+ .set_phys_id = nfp_net_set_phys_id,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
@@ -1499,6 +1716,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
+ .self_test = nfp_net_self_test,
.get_sset_count = nfp_port_get_sset_count,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
@@ -1509,6 +1727,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .get_pauseparam = nfp_port_get_pauseparam,
+ .set_phys_id = nfp_net_set_phys_id,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 75b5018f2e1b..8b77582bdfa0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -365,9 +365,9 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->vlan_features = netdev->hw_features;
- if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN)
+ if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN_ANY)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
else
@@ -375,11 +375,15 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
}
if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (repr_cap & NFP_NET_CFG_CTRL_RXQINQ)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
netdev->features = netdev->hw_features;
- /* Advertise but disable TSO by default. */
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ /* C-Tag strip and S-Tag strip can't be supported simultaneously,
+ * so enable C-Tag strip and disable S-Tag strip by default.
+ */
+ netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
index 86829446c637..aea507aed49d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
@@ -70,8 +70,12 @@ void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- rx_ring->xsk_rxbufs[wr_idx].dma_addr);
+ /* DMA address is expanded to 48-bit width in freelist for NFP3800,
+ * so the *_48b macro is used accordingly, it's also OK to fill
+ * a 40-bit address since the top 8 bits are get set to 0.
+ */
+ nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
+ rx_ring->xsk_rxbufs[wr_idx].dma_addr);
rx_ring->wr_p++;
wr_ptr_add++;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
index afab6f0fc564..6ad43c7cefe6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
@@ -4,7 +4,6 @@
#ifndef NFP_CRC32_H
#define NFP_CRC32_H
-#include <linux/kernel.h>
#include <linux/crc32.h>
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index ddb34bfb9bef..3d379e937184 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -13,36 +13,22 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/sizes.h>
-#include <linux/stringify.h>
#ifndef NFP_SUBSYS
#define NFP_SUBSYS "nfp"
#endif
-#define string_format(x) __FILE__ ":" __stringify(__LINE__) ": " x
-
-#define __nfp_err(cpp, fmt, args...) \
- dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define __nfp_warn(cpp, fmt, args...) \
- dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define __nfp_info(cpp, fmt, args...) \
- dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define __nfp_dbg(cpp, fmt, args...) \
- dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define __nfp_printk(level, cpp, fmt, args...) \
- dev_printk(level, nfp_cpp_device(cpp)->parent, \
- NFP_SUBSYS ": " fmt, ## args)
-
#define nfp_err(cpp, fmt, args...) \
- __nfp_err(cpp, string_format(fmt), ## args)
+ dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_warn(cpp, fmt, args...) \
- __nfp_warn(cpp, string_format(fmt), ## args)
+ dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_info(cpp, fmt, args...) \
- __nfp_info(cpp, string_format(fmt), ## args)
+ dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_dbg(cpp, fmt, args...) \
- __nfp_dbg(cpp, string_format(fmt), ## args)
+ dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_printk(level, cpp, fmt, args...) \
- __nfp_printk(level, cpp, string_format(fmt), ## args)
+ dev_printk(level, nfp_cpp_device(cpp)->parent, \
+ NFP_SUBSYS ": " fmt, ## args)
#define PCI_64BIT_BAR_COUNT 3
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
index 28384d6d1c6f..0725b51c2a95 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
@@ -9,7 +9,7 @@
const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = {
[NFP_DEV_NFP3800] = {
- .dma_mask = DMA_BIT_MASK(40),
+ .dma_mask = DMA_BIT_MASK(48),
.qc_idx_mask = GENMASK(8, 0),
.qc_addr_offset = 0x400000,
.min_qc_size = 512,
@@ -21,7 +21,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = {
.qc_area_sz = 0x100000,
},
[NFP_DEV_NFP3800_VF] = {
- .dma_mask = DMA_BIT_MASK(40),
+ .dma_mask = DMA_BIT_MASK(48),
.qc_idx_mask = GENMASK(8, 0),
.qc_addr_offset = 0,
.min_qc_size = 512,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index f5360bae6f75..77d66855be42 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -196,6 +196,8 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
int
nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state);
+
static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
{
return !!eth_port->fec_modes_supported;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 311a5be25acb..edd300033735 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -49,6 +49,7 @@
#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
+#define NSP_ETH_CTRL_SET_IDMODE BIT_ULL(8)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -492,6 +493,35 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
return 0;
}
+int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ /* Set this features were added in ABI 0.32 */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set id mode operation not supported, please update flash\n");
+ return -EOPNOTSUPP;
+ }
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_SET_IDMODE;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, state);
+ entries[idx].control = cpu_to_le64(reg);
+
+ nfp_nsp_config_set_modified(nsp, true);
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
({ \
__BF_FIELD_CHECK(mask, 0ULL, val, "NFP_ETH_SET_BIT_CONFIG: "); \
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index f3568901eb91..1443f788ee37 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1437,7 +1437,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
- if ((vlan_flags & features) &&
+ if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index f54035455ad6..c03986bf2628 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -947,10 +947,9 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
}
if (encap)
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 07dd3c3b1771..4e6f00af17d9 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1877,7 +1877,7 @@ netxen_tso_check(struct net_device *netdev,
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 672480c9d195..d61cd32ec3b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -412,7 +412,7 @@ static int qed_llh_alloc(struct qed_dev *cdev)
continue;
p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
- DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %hhd\n",
+ DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n",
p_llh_info->num_ppfid, i);
p_llh_info->num_ppfid++;
}
@@ -626,7 +626,7 @@ static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid)
if (ppfid >= p_llh_info->num_ppfid) {
DP_NOTICE(cdev,
- "ppfid %d is not valid, available indices are 0..%hhd\n",
+ "ppfid %d is not valid, available indices are 0..%d\n",
ppfid, p_llh_info->num_ppfid - 1);
*p_abs_ppfid = 0;
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 82e74f62b677..d701ecd3ba00 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1110,7 +1110,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
bit_len);
/* Some bits represent more than a
- * a single interrupt. Correctly print
+ * single interrupt. Correctly print
* their name.
*/
if (ATTENTION_LENGTH(flags) > 2 ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 69b0ede75cae..5a5dbbb8d8aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -42,8 +42,7 @@ int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
bmap->max_count = max_count;
- bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
- GFP_KERNEL);
+ bmap->bitmap = bitmap_zalloc(max_count, GFP_KERNEL);
if (!bmap->bitmap)
return -ENOMEM;
@@ -107,7 +106,7 @@ int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
static bool qed_bmap_is_empty(struct qed_bmap *bmap)
{
- return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
+ return bitmap_empty(bmap->bitmap, bmap->max_count);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
@@ -343,7 +342,7 @@ void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
}
end:
- kfree(bmap->bitmap);
+ bitmap_free(bmap->bitmap);
bmap->bitmap = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index b7cc36589f59..7c2af482192d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -260,11 +260,9 @@ static int map_frag_to_bd(struct qede_tx_queue *txq,
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
{
if (is_encap_pkt)
- return (skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data);
- else
- return (skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 8d43ca282956..9da5e97f8a0a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -497,7 +497,7 @@ set_flags:
}
opcode = QLCNIC_TX_ETHER_PKT;
if (skb_is_gso(skb)) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->hdr_length = hdr_len;
opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index e90fa97c0ae6..8dd7aa08ecfb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1869,8 +1869,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
if (!min_tx_rate)
min_tx_rate = QLC_VF_MIN_TX_RATE;
- if (max_tx_rate &&
- (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
+ if (max_tx_rate && max_tx_rate >= 10000) {
netdev_err(netdev,
"Invalid max Tx rate, allowed range is [%d - %d]",
min_tx_rate, QLC_VF_MAX_TX_RATE);
@@ -1880,8 +1879,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
if (!max_tx_rate)
max_tx_rate = 10000;
- if (min_tx_rate &&
- (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+ if (min_tx_rate && min_tx_rate < QLC_VF_MIN_TX_RATE) {
netdev_err(netdev,
"Invalid min Tx rate, allowed range is [%d - %d]",
QLC_VF_MIN_TX_RATE, max_tx_rate);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 06104d2ff5b3..0d80447d4d3b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1264,7 +1264,7 @@ static int emac_tso_csum(struct emac_adapter *adpt,
pskb_trim(skb, pkt_len);
}
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* we only need to do csum */
netif_warn(adpt, tx_err, adpt->netdev,
@@ -1339,7 +1339,7 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
/* if Large Segment Offload is (in TCP Segmentation Offload struct) */
if (TPD_LSO(tpd)) {
- mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mapped_len = skb_tcp_all_headers(skb);
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
tpbuf->length = mapped_len;
@@ -1465,7 +1465,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
/* Make sure the are enough free descriptors to hold one
* maximum-sized SKB. We need one desc for each fragment,
* one for the checksum (emac_tso_csum), one for TSO, and
- * and one for the SKB header.
+ * one for the SKB header.
*/
if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3))
netif_stop_queue(adpt->netdev);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 407a1f8e3059..a1c10b61269b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -89,7 +89,7 @@ static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
{
- /* Exit and disable EEE in case of we are are in LPI state. */
+ /* Exit and disable EEE in case of we are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index b9298031ea51..bb06fa228367 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -8,7 +8,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
+ mae.o tc.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 8b62ce21aff3..ee734b69150f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2538,23 +2538,33 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
if (rc)
return rc;
+ down_write(&efx->filter_sem);
rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
if (rc)
- return rc;
+ goto out_unlock;
list_for_each_entry(vlan, &nic_data->vlan_list, list) {
rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
if (rc)
goto fail_add_vlan;
}
- return 0;
+ goto out_unlock;
fail_add_vlan:
efx_mcdi_filter_table_remove(efx);
+out_unlock:
+ up_write(&efx->filter_sem);
return rc;
}
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
+
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -3211,9 +3221,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
- up_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc)
@@ -3243,9 +3251,7 @@ restore_vadaptor:
if (rc2)
goto reset_nic;
restore_filters:
- down_write(&efx->filter_sem);
rc2 = efx_ef10_filter_table_probe(efx);
- up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -3275,8 +3281,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_stop(efx->net_dev);
mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
+ efx_ef10_filter_table_remove(efx);
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
@@ -3286,7 +3291,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
sizeof(inbuf), NULL, 0, NULL);
efx_ef10_filter_table_probe(efx);
- up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
if (was_enabled)
@@ -3877,7 +3881,7 @@ static int efx_ef10_udp_tnl_set_port(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
struct efx_ef10_nic_data *nic_data;
int efx_tunnel_type, rc;
@@ -3937,7 +3941,7 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
unsigned int table, unsigned int entry,
struct udp_tunnel_info *ti)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
struct efx_ef10_nic_data *nic_data;
int rc;
@@ -4092,7 +4096,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index 173f0ecebc70..71aab3d0480f 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -423,65 +423,61 @@ static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx,
*/
static void ef100_pci_remove(struct pci_dev *pci_dev)
{
- struct efx_nic *efx;
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+ struct efx_probe_data *probe_data;
- efx = pci_get_drvdata(pci_dev);
if (!efx)
return;
- rtnl_lock();
- dev_close(efx->net_dev);
- rtnl_unlock();
-
- /* Unregistering our netdev notifier triggers unbinding of TC indirect
- * blocks, so we have to do it before PCI removal.
- */
- unregister_netdevice_notifier(&efx->netdev_notifier);
-#if defined(CONFIG_SFC_SRIOV)
- if (!efx->type->is_vf)
- efx_ef100_pci_sriov_disable(efx);
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ ef100_remove_netdev(probe_data);
+#ifdef CONFIG_SFC_SRIOV
+ efx_fini_struct_tc(efx);
#endif
+
ef100_remove(efx);
efx_fini_io(efx);
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
- pci_set_drvdata(pci_dev, NULL);
- efx_fini_struct(efx);
- free_netdev(efx->net_dev);
+ pci_dbg(pci_dev, "shutdown successful\n");
pci_disable_pcie_error_reporting(pci_dev);
+
+ pci_set_drvdata(pci_dev, NULL);
+ efx_fini_struct(efx);
+ kfree(probe_data);
};
static int ef100_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
struct ef100_func_ctl_window fcw = { 0 };
- struct net_device *net_dev;
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
- if (!net_dev)
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
efx->type = (const struct efx_nic_type *)entry->driver_data;
+ efx->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, efx);
- SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail;
efx->vi_stride = EF100_DEFAULT_VI_STRIDE;
- netif_info(efx, probe, efx->net_dev,
- "Solarflare EF100 NIC detected\n");
+ pci_info(pci_dev, "Solarflare EF100 NIC detected\n");
rc = ef100_pci_find_func_ctrl_window(efx, &fcw);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Error looking for ef100 function control window, rc=%d\n",
- rc);
+ pci_err(pci_dev,
+ "Error looking for ef100 function control window, rc=%d\n",
+ rc);
goto fail;
}
@@ -493,8 +489,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
}
if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
- netif_err(efx, probe, efx->net_dev,
- "Func control window overruns BAR\n");
+ pci_err(pci_dev, "Func control window overruns BAR\n");
rc = -EIO;
goto fail;
}
@@ -508,19 +503,16 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
efx->reg_base = fcw.offset;
- efx->netdev_notifier.notifier_call = ef100_netdev_event;
- rc = register_netdevice_notifier(&efx->netdev_notifier);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Failed to register netdevice notifier, rc=%d\n", rc);
+ rc = efx->type->probe(efx);
+ if (rc)
goto fail;
- }
- rc = efx->type->probe(efx);
+ efx->state = STATE_PROBED;
+ rc = ef100_probe_netdev(probe_data);
if (rc)
goto fail;
- netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+ pci_dbg(pci_dev, "initialisation successful\n");
return 0;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 5dba4125d953..702abbe59b76 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -26,7 +26,7 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
ring->rx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 67fe44db6b61..17b9d37218cb 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -22,6 +22,7 @@
#include "ef100_regs.h"
#include "mcdi_filters.h"
#include "rx_common.h"
+#include "ef100_sriov.h"
static void ef100_update_name(struct efx_nic *efx)
{
@@ -79,11 +80,12 @@ static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
*/
static int ef100_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
+ efx_detach_reps(efx);
netif_stop_queue(net_dev);
efx_stop_all(efx);
efx_mcdi_mac_fini_stats(efx);
@@ -96,13 +98,15 @@ static int ef100_net_stop(struct net_device *net_dev)
efx_mcdi_free_vis(efx);
efx_remove_interrupts(efx);
+ efx->state = STATE_NET_DOWN;
+
return 0;
}
/* Context: process, rtnl_lock() held. */
static int ef100_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
unsigned int allocated_vis;
int rc;
@@ -172,6 +176,10 @@ static int ef100_net_open(struct net_device *net_dev)
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
+ efx->state = STATE_NET_UP;
+ if (netif_running(efx->net_dev))
+ efx_attach_reps(efx);
+
return 0;
fail:
@@ -189,7 +197,16 @@ fail:
static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
+}
+
+netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
+ struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct efx_rep *efv)
+{
struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
int rc;
@@ -204,7 +221,7 @@ static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
}
tx_queue = &channel->tx_queue[0];
- rc = ef100_enqueue_skb(tx_queue, skb);
+ rc = __ef100_enqueue_skb(tx_queue, skb, efv);
if (rc == 0)
return NETDEV_TX_OK;
@@ -239,13 +256,14 @@ int ef100_netdev_event(struct notifier_block *this,
struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if (netdev_priv(net_dev) == efx && event == NETDEV_CHANGENAME)
+ if (efx->net_dev == net_dev &&
+ (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
ef100_update_name(efx);
return NOTIFY_DONE;
}
-int ef100_register_netdev(struct efx_nic *efx)
+static int ef100_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
int rc;
@@ -271,7 +289,7 @@ int ef100_register_netdev(struct efx_nic *efx)
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
- efx->state = STATE_READY;
+ efx->state = STATE_NET_DOWN;
rtnl_unlock();
efx_init_mcdi_logging(efx);
@@ -283,11 +301,123 @@ fail_locked:
return rc;
}
-void ef100_unregister_netdev(struct efx_nic *efx)
+static void ef100_unregister_netdev(struct efx_nic *efx)
{
if (efx_dev_registered(efx)) {
efx_fini_mcdi_logging(efx);
- efx->state = STATE_UNINIT;
+ efx->state = STATE_PROBED;
unregister_netdev(efx->net_dev);
}
}
+
+void ef100_remove_netdev(struct efx_probe_data *probe_data)
+{
+ struct efx_nic *efx = &probe_data->efx;
+
+ if (!efx->net_dev)
+ return;
+
+ rtnl_lock();
+ dev_close(efx->net_dev);
+ rtnl_unlock();
+
+ unregister_netdevice_notifier(&efx->netdev_notifier);
+#if defined(CONFIG_SFC_SRIOV)
+ if (!efx->type->is_vf)
+ efx_ef100_pci_sriov_disable(efx, true);
+#endif
+
+ ef100_unregister_netdev(efx);
+
+#ifdef CONFIG_SFC_SRIOV
+ efx_fini_tc(efx);
+#endif
+
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+ efx_fini_channels(efx);
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+
+ free_netdev(efx->net_dev);
+ efx->net_dev = NULL;
+ efx->state = STATE_PROBED;
+}
+
+int ef100_probe_netdev(struct efx_probe_data *probe_data)
+{
+ struct efx_nic *efx = &probe_data->efx;
+ struct efx_probe_data **probe_ptr;
+ struct net_device *net_dev;
+ int rc;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
+ pci_info(efx->pci_dev, "No network port on this PCI function");
+ return 0;
+ }
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
+ SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
+
+ net_dev->features |= efx->type->offload_features;
+ net_dev->hw_features |= efx->type->offload_features;
+ net_dev->hw_enc_features |= efx->type->offload_features;
+ net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
+ netif_set_tso_max_segs(net_dev,
+ ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
+ efx->mdio.dev = net_dev;
+
+ rc = efx_ef100_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail;
+
+ rc = ef100_phy_probe(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ down_write(&efx->filter_sem);
+ rc = ef100_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc)
+ goto fail;
+
+ netdev_rss_key_fill(efx->rss_context.rx_hash_key,
+ sizeof(efx->rss_context.rx_hash_key));
+
+ /* Don't fail init if RSS setup doesn't work. */
+ efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
+
+ rc = ef100_register_netdev(efx);
+ if (rc)
+ goto fail;
+
+ if (!efx->type->is_vf) {
+ rc = ef100_probe_netdev_pf(efx);
+ if (rc)
+ goto fail;
+ }
+
+ efx->netdev_notifier.notifier_call = ef100_netdev_event;
+ rc = register_netdevice_notifier(&efx->netdev_notifier);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to register netdevice notifier, rc=%d\n", rc);
+ goto fail;
+ }
+
+fail:
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.h b/drivers/net/ethernet/sfc/ef100_netdev.h
index d40abb7cc086..86bf985e0951 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.h
+++ b/drivers/net/ethernet/sfc/ef100_netdev.h
@@ -10,8 +10,13 @@
*/
#include <linux/netdevice.h>
+#include "ef100_rep.h"
+netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
+ struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct efx_rep *efv);
int ef100_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
-int ef100_register_netdev(struct efx_nic *efx);
-void ef100_unregister_netdev(struct efx_nic *efx);
+int ef100_probe_netdev(struct efx_probe_data *probe_data);
+void ef100_remove_netdev(struct efx_probe_data *probe_data);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index b2536d2c218a..8061efdaf82c 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -24,6 +24,8 @@
#include "ef100_tx.h"
#include "ef100_sriov.h"
#include "ef100_netdev.h"
+#include "tc.h"
+#include "mae.h"
#include "rx_common.h"
#define EF100_MAX_VIS 4096
@@ -148,7 +150,7 @@ static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
return 0;
}
-static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
+int efx_ef100_init_datapath_caps(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
struct ef100_nic_data *nic_data = efx->nic_data;
@@ -327,7 +329,7 @@ static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ef100_phy_probe(struct efx_nic *efx)
+int ef100_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
int rc;
@@ -365,7 +367,7 @@ static int ef100_phy_probe(struct efx_nic *efx)
return 0;
}
-static int ef100_filter_table_probe(struct efx_nic *efx)
+int ef100_filter_table_probe(struct efx_nic *efx)
{
return efx_mcdi_filter_table_probe(efx, true);
}
@@ -374,26 +376,46 @@ static int ef100_filter_table_up(struct efx_nic *efx)
{
int rc;
+ down_write(&efx->filter_sem);
rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
- if (rc) {
- efx_mcdi_filter_table_down(efx);
- return rc;
- }
+ if (rc)
+ goto fail_unspec;
rc = efx_mcdi_filter_add_vlan(efx, 0);
- if (rc) {
- efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
- efx_mcdi_filter_table_down(efx);
- }
+ if (rc)
+ goto fail_vlan0;
+ /* Drop the lock: we've finished altering table existence, and
+ * filter insertion will need to take the lock for read.
+ */
+ up_write(&efx->filter_sem);
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx_tc_insert_rep_filters(efx);
+ /* Rep filter failure is nonfatal */
+ if (rc)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to insert representor filters, rc %d\n",
+ rc);
+#endif
+ return 0;
+fail_vlan0:
+ efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+fail_unspec:
+ efx_mcdi_filter_table_down(efx);
+ up_write(&efx->filter_sem);
return rc;
}
static void ef100_filter_table_down(struct efx_nic *efx)
{
+#ifdef CONFIG_SFC_SRIOV
+ efx_tc_remove_rep_filters(efx);
+#endif
+ down_write(&efx->filter_sem);
efx_mcdi_filter_del_vlan(efx, 0);
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
efx_mcdi_filter_table_down(efx);
+ up_write(&efx->filter_sem);
}
/* Other
@@ -704,178 +726,31 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
return 10 * EFX_RECYCLE_RING_SIZE_10G;
}
-/* NIC level access functions
- */
-#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
- NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
- NETIF_F_HW_VLAN_CTAG_TX)
-
-const struct efx_nic_type ef100_pf_nic_type = {
- .revision = EFX_REV_EF100,
- .is_vf = false,
- .probe = ef100_probe_pf,
- .offload_features = EF100_OFFLOAD_FEATURES,
- .mcdi_max_ver = 2,
- .mcdi_request = ef100_mcdi_request,
- .mcdi_poll_response = ef100_mcdi_poll_response,
- .mcdi_read_response = ef100_mcdi_read_response,
- .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
- .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
- .irq_enable_master = efx_port_dummy_op_void,
- .irq_test_generate = efx_ef100_irq_test_generate,
- .irq_disable_non_ev = efx_port_dummy_op_void,
- .push_irq_moderation = efx_channel_dummy_op_void,
- .min_interrupt_mode = EFX_INT_MODE_MSIX,
- .map_reset_reason = ef100_map_reset_reason,
- .map_reset_flags = ef100_map_reset_flags,
- .reset = ef100_reset,
-
- .check_caps = ef100_check_caps,
-
- .ev_probe = ef100_ev_probe,
- .ev_init = ef100_ev_init,
- .ev_fini = efx_mcdi_ev_fini,
- .ev_remove = efx_mcdi_ev_remove,
- .irq_handle_msi = ef100_msi_interrupt,
- .ev_process = ef100_ev_process,
- .ev_read_ack = ef100_ev_read_ack,
- .ev_test_generate = efx_ef100_ev_test_generate,
- .tx_probe = ef100_tx_probe,
- .tx_init = ef100_tx_init,
- .tx_write = ef100_tx_write,
- .tx_enqueue = ef100_enqueue_skb,
- .rx_probe = efx_mcdi_rx_probe,
- .rx_init = efx_mcdi_rx_init,
- .rx_remove = efx_mcdi_rx_remove,
- .rx_write = ef100_rx_write,
- .rx_packet = __ef100_rx_packet,
- .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
- .fini_dmaq = efx_fini_dmaq,
- .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
- .filter_table_probe = ef100_filter_table_up,
- .filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = ef100_filter_table_down,
- .filter_insert = efx_mcdi_filter_insert,
- .filter_remove_safe = efx_mcdi_filter_remove_safe,
- .filter_get_safe = efx_mcdi_filter_get_safe,
- .filter_clear_rx = efx_mcdi_filter_clear_rx,
- .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
-#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
-#endif
-
- .get_phys_port_id = efx_ef100_get_phys_port_id,
-
- .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
- .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
- .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
- .rx_hash_key_size = 40,
- .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
- .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
- .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
- .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
- .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
- .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
-
- .reconfigure_mac = ef100_reconfigure_mac,
- .reconfigure_port = efx_mcdi_port_reconfigure,
- .test_nvram = efx_new_mcdi_nvram_test_all,
- .describe_stats = ef100_describe_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .update_stats = ef100_update_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
#ifdef CONFIG_SFC_SRIOV
- .sriov_configure = efx_ef100_sriov_configure,
-#endif
+static int efx_ef100_get_base_mport(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ u32 selector, id;
+ int rc;
- /* Per-type bar/size configuration not used on ef100. Location of
- * registers is defined by extended capabilities.
+ /* Construct mport selector for "physical network port" */
+ efx_mae_mport_wire(efx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_lookup_mport(efx, selector, &id);
+ if (rc)
+ return rc;
+ /* The ID should always fit in 16 bits, because that's how wide the
+ * corresponding fields in the RX prefix & TX override descriptor are
*/
- .mem_bar = NULL,
- .mem_map_size = NULL,
-
-};
-
-const struct efx_nic_type ef100_vf_nic_type = {
- .revision = EFX_REV_EF100,
- .is_vf = true,
- .probe = ef100_probe_vf,
- .offload_features = EF100_OFFLOAD_FEATURES,
- .mcdi_max_ver = 2,
- .mcdi_request = ef100_mcdi_request,
- .mcdi_poll_response = ef100_mcdi_poll_response,
- .mcdi_read_response = ef100_mcdi_read_response,
- .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
- .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
- .irq_enable_master = efx_port_dummy_op_void,
- .irq_test_generate = efx_ef100_irq_test_generate,
- .irq_disable_non_ev = efx_port_dummy_op_void,
- .push_irq_moderation = efx_channel_dummy_op_void,
- .min_interrupt_mode = EFX_INT_MODE_MSIX,
- .map_reset_reason = ef100_map_reset_reason,
- .map_reset_flags = ef100_map_reset_flags,
- .reset = ef100_reset,
- .check_caps = ef100_check_caps,
- .ev_probe = ef100_ev_probe,
- .ev_init = ef100_ev_init,
- .ev_fini = efx_mcdi_ev_fini,
- .ev_remove = efx_mcdi_ev_remove,
- .irq_handle_msi = ef100_msi_interrupt,
- .ev_process = ef100_ev_process,
- .ev_read_ack = ef100_ev_read_ack,
- .ev_test_generate = efx_ef100_ev_test_generate,
- .tx_probe = ef100_tx_probe,
- .tx_init = ef100_tx_init,
- .tx_write = ef100_tx_write,
- .tx_enqueue = ef100_enqueue_skb,
- .rx_probe = efx_mcdi_rx_probe,
- .rx_init = efx_mcdi_rx_init,
- .rx_remove = efx_mcdi_rx_remove,
- .rx_write = ef100_rx_write,
- .rx_packet = __ef100_rx_packet,
- .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
- .fini_dmaq = efx_fini_dmaq,
- .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
- .filter_table_probe = ef100_filter_table_up,
- .filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = ef100_filter_table_down,
- .filter_insert = efx_mcdi_filter_insert,
- .filter_remove_safe = efx_mcdi_filter_remove_safe,
- .filter_get_safe = efx_mcdi_filter_get_safe,
- .filter_clear_rx = efx_mcdi_filter_clear_rx,
- .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
-#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+ if (id >> 16)
+ netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n",
+ id);
+ nic_data->base_mport = id;
+ nic_data->have_mport = true;
+ return 0;
+}
#endif
- .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
- .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
- .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
- .rx_hash_key_size = 40,
- .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
- .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
- .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
- .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
-
- .reconfigure_mac = ef100_reconfigure_mac,
- .test_nvram = efx_new_mcdi_nvram_test_all,
- .describe_stats = ef100_describe_stats,
- .start_stats = efx_mcdi_mac_start_stats,
- .update_stats = ef100_update_stats,
- .pull_stats = efx_mcdi_mac_pull_stats,
- .stop_stats = efx_mcdi_mac_stop_stats,
-
- .mem_bar = NULL,
- .mem_map_size = NULL,
-
-};
-
static int compare_versions(const char *a, const char *b)
{
int a_major, a_minor, a_point, a_patch;
@@ -1077,8 +952,7 @@ static int ef100_check_design_params(struct efx_nic *efx)
efx_readd(efx, &reg, ER_GZ_PARAMS_TLV_LEN);
total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
- netif_dbg(efx, probe, efx->net_dev, "%u bytes of design parameters\n",
- total_len);
+ pci_dbg(efx->pci_dev, "%u bytes of design parameters\n", total_len);
while (offset < total_len) {
efx_readd(efx, &reg, ER_GZ_PARAMS_TLV + offset);
data = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
@@ -1117,9 +991,9 @@ out:
static int ef100_probe_main(struct efx_nic *efx)
{
unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
- struct net_device *net_dev = efx->net_dev;
struct ef100_nic_data *nic_data;
char fw_version[32];
+ u32 priv_mask = 0;
int i, rc;
if (WARN_ON(bar_size == 0))
@@ -1130,24 +1004,18 @@ static int ef100_probe_main(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
nic_data->efx = efx;
- net_dev->features |= efx->type->offload_features;
- net_dev->hw_features |= efx->type->offload_features;
- net_dev->hw_enc_features |= efx->type->offload_features;
- net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
+ efx->max_vis = EF100_MAX_VIS;
/* Populate design-parameter defaults */
nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
- netif_set_tso_max_segs(net_dev,
- ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
+
/* Read design parameters */
rc = ef100_check_design_params(efx);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unsupported design parameters\n");
+ pci_err(efx->pci_dev, "Unsupported design parameters\n");
goto fail;
}
@@ -1184,12 +1052,6 @@ static int ef100_probe_main(struct efx_nic *efx)
/* Post-IO section. */
rc = efx_mcdi_init(efx);
- if (!rc && efx->mcdi->fn_flags &
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
- netif_info(efx, probe, efx->net_dev,
- "No network port on this PCI function");
- rc = -ENODEV;
- }
if (rc)
goto fail;
/* Reset (most) configuration for this function */
@@ -1205,67 +1067,43 @@ static int ef100_probe_main(struct efx_nic *efx)
if (rc)
goto fail;
- rc = efx_ef100_init_datapath_caps(efx);
- if (rc < 0)
- goto fail;
-
- efx->max_vis = EF100_MAX_VIS;
-
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
goto fail;
efx->port_num = rc;
efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
- netif_dbg(efx, drv, efx->net_dev, "Firmware version %s\n", fw_version);
+ pci_dbg(efx->pci_dev, "Firmware version %s\n", fw_version);
+
+ rc = efx_mcdi_get_privilege_mask(efx, &priv_mask);
+ if (rc) /* non-fatal, and priv_mask will still be 0 */
+ pci_info(efx->pci_dev,
+ "Failed to get privilege mask from FW, rc %d\n", rc);
+ nic_data->grp_mae = !!(priv_mask & MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE);
if (compare_versions(fw_version, "1.1.0.1000") < 0) {
- netif_info(efx, drv, efx->net_dev, "Firmware uses old event descriptors\n");
+ pci_info(efx->pci_dev, "Firmware uses old event descriptors\n");
rc = -EINVAL;
goto fail;
}
if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) {
- netif_info(efx, drv, efx->net_dev, "Firmware uses unsolicited-event credits\n");
+ pci_info(efx->pci_dev, "Firmware uses unsolicited-event credits\n");
rc = -EINVAL;
goto fail;
}
- rc = ef100_phy_probe(efx);
- if (rc)
- goto fail;
-
- down_write(&efx->filter_sem);
- rc = ef100_filter_table_probe(efx);
- up_write(&efx->filter_sem);
- if (rc)
- goto fail;
-
- netdev_rss_key_fill(efx->rss_context.rx_hash_key,
- sizeof(efx->rss_context.rx_hash_key));
-
- /* Don't fail init if RSS setup doesn't work. */
- efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
-
- rc = ef100_register_netdev(efx);
- if (rc)
- goto fail;
-
return 0;
fail:
return rc;
}
-int ef100_probe_pf(struct efx_nic *efx)
+int ef100_probe_netdev_pf(struct efx_nic *efx)
{
+ struct ef100_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
- struct ef100_nic_data *nic_data;
- int rc = ef100_probe_main(efx);
-
- if (rc)
- goto fail;
+ int rc;
- nic_data = efx->nic_data;
rc = ef100_get_mac_address(efx, net_dev->perm_addr);
if (rc)
goto fail;
@@ -1273,6 +1111,34 @@ int ef100_probe_pf(struct efx_nic *efx)
eth_hw_addr_set(net_dev, net_dev->perm_addr);
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
+ if (!nic_data->grp_mae)
+ return 0;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx_init_struct_tc(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ef100_get_base_mport(efx);
+ if (rc) {
+ netif_warn(efx, probe, net_dev,
+ "Failed to probe base mport rc %d; representors will not function\n",
+ rc);
+ }
+
+ rc = efx_init_tc(efx);
+ if (rc) {
+ /* Either we don't have an MAE at all (i.e. legacy v-switching),
+ * or we do but we failed to probe it. In the latter case, we
+ * may not have set up default rules, in which case we won't be
+ * able to pass any traffic. However, we don't fail the probe,
+ * because the user might need to use the netdevice to apply
+ * configuration changes to fix whatever's wrong with the MAE.
+ */
+ netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
+ rc);
+ }
+#endif
return 0;
fail:
@@ -1288,14 +1154,6 @@ void ef100_remove(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
- ef100_unregister_netdev(efx);
-
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
- up_write(&efx->filter_sem);
- efx_fini_channels(efx);
- kfree(efx->phy_data);
- efx->phy_data = NULL;
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
if (nic_data)
@@ -1303,3 +1161,175 @@ void ef100_remove(struct efx_nic *efx)
kfree(nic_data);
efx->nic_data = NULL;
}
+
+/* NIC level access functions
+ */
+#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
+ NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
+ NETIF_F_HW_VLAN_CTAG_TX)
+
+const struct efx_nic_type ef100_pf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = false,
+ .probe = ef100_probe_main,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+
+ .check_caps = ef100_check_caps,
+
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .get_phys_port_id = efx_ef100_get_phys_port_id,
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef100_sriov_configure,
+#endif
+
+ /* Per-type bar/size configuration not used on ef100. Location of
+ * registers is defined by extended capabilities.
+ */
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
+
+const struct efx_nic_type ef100_vf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = true,
+ .probe = ef100_probe_vf,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+ .check_caps = ef100_check_caps,
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
index e799688d5264..0295933145fa 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.h
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -8,6 +8,8 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
+#ifndef EFX_EF100_NIC_H
+#define EFX_EF100_NIC_H
#include "net_driver.h"
#include "nic_common.h"
@@ -15,7 +17,7 @@
extern const struct efx_nic_type ef100_pf_nic_type;
extern const struct efx_nic_type ef100_vf_nic_type;
-int ef100_probe_pf(struct efx_nic *efx);
+int ef100_probe_netdev_pf(struct efx_nic *efx);
int ef100_probe_vf(struct efx_nic *efx);
void ef100_remove(struct efx_nic *efx);
@@ -70,6 +72,9 @@ struct ef100_nic_data {
u8 port_id[ETH_ALEN];
DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
u64 stats[EF100_STAT_COUNT];
+ u32 base_mport;
+ bool have_mport; /* base_mport was populated successfully */
+ bool grp_mae; /* MAE Privilege */
u16 tso_max_hdr_len;
u16 tso_max_payload_num_segs;
u16 tso_max_frames;
@@ -78,3 +83,9 @@ struct ef100_nic_data {
#define efx_ef100_has_cap(caps, flag) \
(!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V4_OUT_ ## flag ## _LBN)))
+
+int efx_ef100_init_datapath_caps(struct efx_nic *efx);
+int ef100_phy_probe(struct efx_nic *efx);
+int ef100_filter_table_probe(struct efx_nic *efx);
+
+#endif /* EFX_EF100_NIC_H */
diff --git a/drivers/net/ethernet/sfc/ef100_regs.h b/drivers/net/ethernet/sfc/ef100_regs.h
index 710bbdb19885..982b6ab1eb62 100644
--- a/drivers/net/ethernet/sfc/ef100_regs.h
+++ b/drivers/net/ethernet/sfc/ef100_regs.h
@@ -2,7 +2,7 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
- * Copyright 2019-2020 Xilinx Inc.
+ * Copyright 2019-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -181,12 +181,6 @@
/* RHEAD_BASE_EVENT */
#define ESF_GZ_E_TYPE_LBN 60
#define ESF_GZ_E_TYPE_WIDTH 4
-#define ESE_GZ_EF100_EV_DRIVER 5
-#define ESE_GZ_EF100_EV_MCDI 4
-#define ESE_GZ_EF100_EV_CONTROL 3
-#define ESE_GZ_EF100_EV_TX_TIMESTAMP 2
-#define ESE_GZ_EF100_EV_TX_COMPLETION 1
-#define ESE_GZ_EF100_EV_RX_PKTS 0
#define ESF_GZ_EV_EVQ_PHASE_LBN 59
#define ESF_GZ_EV_EVQ_PHASE_WIDTH 1
#define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64
@@ -369,14 +363,18 @@
#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16
#define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144
#define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16
-#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_LBN 128
-#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_WIDTH 16
+#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_LBN 128
+#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_WIDTH 16
#define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96
#define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32
#define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64
#define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32
-#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 32
-#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 32
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 34
+#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 30
+#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_LBN 33
+#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_WIDTH 1
+#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_LBN 32
+#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_WIDTH 1
#define ESF_GZ_RX_PREFIX_CLASS_LBN 16
#define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16
#define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15
@@ -454,12 +452,8 @@
#define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1
#define ESF_GZ_M2M_RSVD_LBN 120
#define ESF_GZ_M2M_RSVD_WIDTH 2
-#define ESF_GZ_M2M_ADDR_SPC_LBN 108
-#define ESF_GZ_M2M_ADDR_SPC_WIDTH 12
-#define ESF_GZ_M2M_ADDR_SPC_PASID_LBN 86
-#define ESF_GZ_M2M_ADDR_SPC_PASID_WIDTH 22
-#define ESF_GZ_M2M_ADDR_SPC_MODE_LBN 84
-#define ESF_GZ_M2M_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_M2M_ADDR_SPC_ID_LBN 84
+#define ESF_GZ_M2M_ADDR_SPC_ID_WIDTH 36
#define ESF_GZ_M2M_LEN_MINUS_1_LBN 64
#define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20
#define ESF_GZ_M2M_ADDR_LBN 0
@@ -492,12 +486,8 @@
#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1
#define ESF_GZ_TX_SEG_RSVD2_LBN 120
#define ESF_GZ_TX_SEG_RSVD2_WIDTH 2
-#define ESF_GZ_TX_SEG_ADDR_SPC_LBN 108
-#define ESF_GZ_TX_SEG_ADDR_SPC_WIDTH 12
-#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_LBN 86
-#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_WIDTH 22
-#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_LBN 84
-#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_WIDTH 2
+#define ESF_GZ_TX_SEG_ADDR_SPC_ID_LBN 84
+#define ESF_GZ_TX_SEG_ADDR_SPC_ID_WIDTH 36
#define ESF_GZ_TX_SEG_RSVD_LBN 80
#define ESF_GZ_TX_SEG_RSVD_WIDTH 4
#define ESF_GZ_TX_SEG_LEN_LBN 64
@@ -583,6 +573,12 @@
#define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124
+/* Enum D2VIO_MSG_OP */
+#define ESE_GZ_QUE_JBDNE 3
+#define ESE_GZ_QUE_EVICT 2
+#define ESE_GZ_QUE_EMPTY 1
+#define ESE_GZ_NOP 0
+
/* Enum DESIGN_PARAMS */
#define ESE_EF100_DP_GZ_RX_MAX_RUNT 17
#define ESE_EF100_DP_GZ_VI_STRIDES 16
@@ -630,6 +626,19 @@
#define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256
#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4
+/* Enum RH_DSC_TYPE */
+#define ESE_GZ_TX_TOMB 0xF
+#define ESE_GZ_TX_VIO 0xE
+#define ESE_GZ_TX_TSO_OVRRD 0x8
+#define ESE_GZ_TX_D2CMP 0x7
+#define ESE_GZ_TX_DATA 0x6
+#define ESE_GZ_TX_D2M 0x5
+#define ESE_GZ_TX_M2M 0x4
+#define ESE_GZ_TX_SEG 0x3
+#define ESE_GZ_TX_TSO 0x2
+#define ESE_GZ_TX_OVRRD 0x1
+#define ESE_GZ_TX_SEND 0x0
+
/* Enum RH_HCLASS_L2_CLASS */
#define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1
#define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0
@@ -666,6 +675,25 @@
#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1
#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0
+/* Enum SF_CTL_EVENT_SUBTYPE */
+#define ESE_GZ_EF100_CTL_EV_EVQ_TIMEOUT 0x3
+#define ESE_GZ_EF100_CTL_EV_FLUSH 0x2
+#define ESE_GZ_EF100_CTL_EV_TIME_SYNC 0x1
+#define ESE_GZ_EF100_CTL_EV_UNSOL_OVERFLOW 0x0
+
+/* Enum SF_EVENT_TYPE */
+#define ESE_GZ_EF100_EV_DRIVER 0x5
+#define ESE_GZ_EF100_EV_MCDI 0x4
+#define ESE_GZ_EF100_EV_CONTROL 0x3
+#define ESE_GZ_EF100_EV_TX_TIMESTAMP 0x2
+#define ESE_GZ_EF100_EV_TX_COMPLETION 0x1
+#define ESE_GZ_EF100_EV_RX_PKTS 0x0
+
+/* Enum SF_EW_EVENT_TYPE */
+#define ESE_GZ_EF100_EWEV_VIRTQ_DESC 0x2
+#define ESE_GZ_EF100_EWEV_TXQ_DESC 0x1
+#define ESE_GZ_EF100_EWEV_64BIT 0x0
+
/* Enum TX_DESC_CSO_PARTIAL_EN */
#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2
#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1
@@ -681,6 +709,15 @@
#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2
#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1
#define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0
+
+/* Enum VIRTIO_NET_HDR_F */
+#define ESE_GZ_NEEDS_CSUM 0x1
+
+/* Enum VIRTIO_NET_HDR_GSO */
+#define ESE_GZ_TCPV6 0x4
+#define ESE_GZ_UDP 0x3
+#define ESE_GZ_TCPV4 0x1
+#define ESE_GZ_NONE 0x0
/**************************************************************************/
#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
new file mode 100644
index 000000000000..73ae4656a6e7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "ef100_rep.h"
+#include "ef100_netdev.h"
+#include "ef100_nic.h"
+#include "mae.h"
+#include "rx_common.h"
+
+#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
+
+#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
+
+static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
+ unsigned int i)
+{
+ efv->parent = efx;
+ efv->idx = i;
+ INIT_LIST_HEAD(&efv->list);
+ efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efv->dflt.acts.list);
+ INIT_LIST_HEAD(&efv->rx_list);
+ spin_lock_init(&efv->rx_lock);
+ efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW;
+ return 0;
+}
+
+static int efx_ef100_rep_open(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&efv->napi);
+ return 0;
+}
+
+static int efx_ef100_rep_close(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ napi_disable(&efv->napi);
+ netif_napi_del(&efv->napi);
+ return 0;
+}
+
+static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ netdev_tx_t rc;
+
+ /* __ef100_hard_start_xmit() will always return success even in the
+ * case of TX drops, where it will increment efx's tx_dropped. The
+ * efv stats really only count attempted TX, not success/failure.
+ */
+ atomic64_inc(&efv->stats.tx_packets);
+ atomic64_add(skb->len, &efv->stats.tx_bytes);
+ netif_tx_lock(efx->net_dev);
+ rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
+ netif_tx_unlock(efx->net_dev);
+ return rc;
+}
+
+static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ struct ef100_nic_data *nic_data;
+
+ nic_data = efx->nic_data;
+ /* nic_data->port_id is a u8[] */
+ ppid->id_len = sizeof(nic_data->port_id);
+ memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
+ return 0;
+}
+
+static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+ struct efx_nic *efx = efv->parent;
+ struct ef100_nic_data *nic_data;
+ int ret;
+
+ nic_data = efx->nic_data;
+ ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
+ nic_data->pf_index, efv->idx);
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void efx_ef100_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+
+ stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
+ stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
+ stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
+ stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
+ stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
+ stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
+}
+
+static const struct net_device_ops efx_ef100_rep_netdev_ops = {
+ .ndo_open = efx_ef100_rep_open,
+ .ndo_stop = efx_ef100_rep_close,
+ .ndo_start_xmit = efx_ef100_rep_xmit,
+ .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
+ .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
+ .ndo_get_stats64 = efx_ef100_rep_get_stats64,
+};
+
+static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
+}
+
+static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ return efv->msg_enable;
+}
+
+static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
+ u32 msg_enable)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ efv->msg_enable = msg_enable;
+}
+
+static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ ring->rx_max_pending = U32_MAX;
+ ring->rx_pending = efv->rx_pring_size;
+}
+
+static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
+ return -EINVAL;
+
+ efv->rx_pring_size = ring->rx_pending;
+ return 0;
+}
+
+static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
+ .get_drvinfo = efx_ef100_rep_get_drvinfo,
+ .get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
+ .set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
+ .get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
+ .set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
+};
+
+static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
+ unsigned int i)
+{
+ struct net_device *net_dev;
+ struct efx_rep *efv;
+ int rc;
+
+ net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
+ if (!net_dev)
+ return ERR_PTR(-ENOMEM);
+
+ efv = netdev_priv(net_dev);
+ rc = efx_ef100_rep_init_struct(efx, efv, i);
+ if (rc)
+ goto fail1;
+ efv->net_dev = net_dev;
+ rtnl_lock();
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_add_tail(&efv->list, &efx->vf_reps);
+ spin_unlock_bh(&efx->vf_reps_lock);
+ if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
+ netif_device_attach(net_dev);
+ netif_carrier_on(net_dev);
+ } else {
+ netif_carrier_off(net_dev);
+ netif_tx_stop_all_queues(net_dev);
+ }
+ rtnl_unlock();
+
+ net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
+ net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
+ net_dev->min_mtu = EFX_MIN_MTU;
+ net_dev->max_mtu = EFX_MAX_MTU;
+ net_dev->features |= NETIF_F_LLTX;
+ net_dev->hw_features |= NETIF_F_LLTX;
+ return efv;
+fail1:
+ free_netdev(net_dev);
+ return ERR_PTR(rc);
+}
+
+static int efx_ef100_configure_rep(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+ u32 selector;
+ int rc;
+
+ efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
+ /* Construct mport selector for corresponding VF */
+ efx_mae_mport_vf(efx, efv->idx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
+ if (rc)
+ return rc;
+ pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
+ /* mport label should fit in 16 bits */
+ WARN_ON(efv->mport >> 16);
+
+ return efx_tc_configure_default_rule_rep(efv);
+}
+
+static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+
+ efx_tc_deconfigure_default_rule(efx, &efv->dflt);
+}
+
+static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+
+ rtnl_lock();
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_del(&efv->list);
+ spin_unlock_bh(&efx->vf_reps_lock);
+ rtnl_unlock();
+ synchronize_rcu();
+ free_netdev(efv->net_dev);
+}
+
+int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
+{
+ struct efx_rep *efv;
+ int rc;
+
+ efv = efx_ef100_rep_create_netdev(efx, i);
+ if (IS_ERR(efv)) {
+ rc = PTR_ERR(efv);
+ pci_err(efx->pci_dev,
+ "Failed to create representor for VF %d, rc %d\n", i,
+ rc);
+ return rc;
+ }
+ rc = efx_ef100_configure_rep(efv);
+ if (rc) {
+ pci_err(efx->pci_dev,
+ "Failed to configure representor for VF %d, rc %d\n",
+ i, rc);
+ goto fail1;
+ }
+ rc = register_netdev(efv->net_dev);
+ if (rc) {
+ pci_err(efx->pci_dev,
+ "Failed to register representor for VF %d, rc %d\n",
+ i, rc);
+ goto fail2;
+ }
+ pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
+ efv->net_dev->name);
+ return 0;
+fail2:
+ efx_ef100_deconfigure_rep(efv);
+fail1:
+ efx_ef100_rep_destroy_netdev(efv);
+ return rc;
+}
+
+void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
+{
+ struct net_device *rep_dev;
+
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ return;
+ netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
+ unregister_netdev(rep_dev);
+ efx_ef100_deconfigure_rep(efv);
+ efx_ef100_rep_destroy_netdev(efv);
+}
+
+void efx_ef100_fini_vfreps(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_rep *efv, *next;
+
+ if (!nic_data->grp_mae)
+ return;
+
+ list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
+ efx_ef100_vfrep_destroy(efx, efv);
+}
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
+{
+ struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
+ unsigned int read_index;
+ struct list_head head;
+ struct sk_buff *skb;
+ bool need_resched;
+ int spent = 0;
+
+ INIT_LIST_HEAD(&head);
+ /* Grab up to 'weight' pending SKBs */
+ spin_lock_bh(&efv->rx_lock);
+ read_index = efv->write_index;
+ while (spent < weight && !list_empty(&efv->rx_list)) {
+ skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
+ list_del(&skb->list);
+ list_add_tail(&skb->list, &head);
+ spent++;
+ }
+ spin_unlock_bh(&efv->rx_lock);
+ /* Receive them */
+ netif_receive_skb_list(&head);
+ if (spent < weight)
+ if (napi_complete_done(napi, spent)) {
+ spin_lock_bh(&efv->rx_lock);
+ efv->read_index = read_index;
+ /* If write_index advanced while we were doing the
+ * RX, then storing our read_index won't re-prime the
+ * fake-interrupt. In that case, we need to schedule
+ * NAPI again to consume the additional packet(s).
+ */
+ need_resched = efv->write_index != read_index;
+ spin_unlock_bh(&efv->rx_lock);
+ if (need_resched)
+ napi_schedule(&efv->napi);
+ }
+ return spent;
+}
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
+{
+ u8 *eh = efx_rx_buf_va(rx_buf);
+ struct sk_buff *skb;
+ bool primed;
+
+ /* Don't allow too many queued SKBs to build up, as they consume
+ * GFP_ATOMIC memory. If we overrun, just start dropping.
+ */
+ if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "nodesc-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+
+ skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
+ if (!skb) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "noskb-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+ memcpy(skb->data, eh, rx_buf->len);
+ __skb_put(skb, rx_buf->len);
+
+ skb_record_rx_queue(skb, 0); /* rep is single-queue */
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efv->net_dev);
+
+ skb_checksum_none_assert(skb);
+
+ atomic64_inc(&efv->stats.rx_packets);
+ atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
+
+ /* Add it to the rx list */
+ spin_lock_bh(&efv->rx_lock);
+ primed = efv->read_index == efv->write_index;
+ list_add_tail(&skb->list, &efv->rx_list);
+ efv->write_index++;
+ spin_unlock_bh(&efv->rx_lock);
+ /* Trigger rx work */
+ if (primed)
+ napi_schedule(&efv->napi);
+}
+
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
+{
+ struct efx_rep *efv, *out = NULL;
+
+ /* spinlock guards against list mutation while we're walking it;
+ * but caller must also hold rcu_read_lock() to ensure the netdev
+ * isn't freed after we drop the spinlock.
+ */
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_for_each_entry(efv, &efx->vf_reps, list)
+ if (efv->mport == mport) {
+ out = efv;
+ break;
+ }
+ spin_unlock_bh(&efx->vf_reps_lock);
+ return out;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
new file mode 100644
index 000000000000..070f700893c1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Handling for ef100 representor netdevs */
+#ifndef EF100_REP_H
+#define EF100_REP_H
+
+#include "net_driver.h"
+#include "tc.h"
+
+struct efx_rep_sw_stats {
+ atomic64_t rx_packets, tx_packets;
+ atomic64_t rx_bytes, tx_bytes;
+ atomic64_t rx_dropped, tx_errors;
+};
+
+/**
+ * struct efx_rep - Private data for an Efx representor
+ *
+ * @parent: the efx PF which manages this representor
+ * @net_dev: representor netdevice
+ * @msg_enable: log message enable flags
+ * @mport: m-port ID of corresponding VF
+ * @idx: VF index
+ * @write_index: number of packets enqueued to @rx_list
+ * @read_index: number of packets consumed from @rx_list
+ * @rx_pring_size: max length of RX list
+ * @dflt: default-rule for MAE switching
+ * @list: entry on efx->vf_reps
+ * @rx_list: list of SKBs queued for receive in NAPI poll
+ * @rx_lock: protects @rx_list
+ * @napi: NAPI control structure
+ * @stats: software traffic counters for netdev stats
+ */
+struct efx_rep {
+ struct efx_nic *parent;
+ struct net_device *net_dev;
+ u32 msg_enable;
+ u32 mport;
+ unsigned int idx;
+ unsigned int write_index, read_index;
+ unsigned int rx_pring_size;
+ struct efx_tc_flow_rule dflt;
+ struct list_head list;
+ struct list_head rx_list;
+ spinlock_t rx_lock;
+ struct napi_struct napi;
+ struct efx_rep_sw_stats stats;
+};
+
+int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
+void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
+void efx_ef100_fini_vfreps(struct efx_nic *efx);
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
+/* Returns the representor corresponding to a VF m-port, or NULL
+ * @mport is an m-port label, *not* an m-port ID!
+ * Caller must hold rcu_read_lock().
+ */
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
+#endif /* EF100_REP_H */
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 85207acf7dee..65bbe37753e6 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -55,10 +55,14 @@ static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
void __ef100_rx_packet(struct efx_channel *channel)
{
- struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
+ channel->rx_pkt_index);
struct efx_nic *efx = channel->efx;
+ struct ef100_nic_data *nic_data;
u8 *eh = efx_rx_buf_va(rx_buf);
__wsum csum = 0;
+ u16 ing_port;
u32 *prefix;
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
@@ -76,6 +80,37 @@ void __ef100_rx_packet(struct efx_channel *channel)
goto out;
}
+ ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
+
+ nic_data = efx->nic_data;
+
+ if (nic_data->have_mport && ing_port != nic_data->base_mport) {
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_rep *efv;
+
+ rcu_read_lock();
+ efv = efx_ef100_find_rep_by_mport(efx, ing_port);
+ if (efv) {
+ if (efv->net_dev->flags & IFF_UP)
+ efx_ef100_rep_rx_packet(efv, rx_buf);
+ rcu_read_unlock();
+ /* Representor Rx doesn't care about PF Rx buffer
+ * ownership, it just makes a copy. So, we are done
+ * with the Rx buffer from PF point of view and should
+ * free it.
+ */
+ goto free_rx_buffer;
+ }
+ rcu_read_unlock();
+#endif
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Unrecognised ing_port %04x (base %04x), dropping\n",
+ ing_port, nic_data->base_mport);
+ channel->n_rx_mport_bad++;
+ goto free_rx_buffer;
+ }
+
if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
++channel->n_rx_ip_hdr_chksum_err;
@@ -87,17 +122,16 @@ void __ef100_rx_packet(struct efx_channel *channel)
}
if (channel->type->receive_skb) {
- struct efx_rx_queue *rx_queue =
- efx_channel_get_rx_queue(channel);
-
/* no support for special channels yet, so just discard */
WARN_ON_ONCE(1);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
- goto out;
+ goto free_rx_buffer;
}
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
+ goto out;
+free_rx_buffer:
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
out:
channel->rx_pkt_n_frags = 0;
}
diff --git a/drivers/net/ethernet/sfc/ef100_sriov.c b/drivers/net/ethernet/sfc/ef100_sriov.c
index 664578176bfe..94bdbfcb47e8 100644
--- a/drivers/net/ethernet/sfc/ef100_sriov.c
+++ b/drivers/net/ethernet/sfc/ef100_sriov.c
@@ -11,46 +11,62 @@
#include "ef100_sriov.h"
#include "ef100_nic.h"
+#include "ef100_rep.h"
static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
{
+ struct ef100_nic_data *nic_data = efx->nic_data;
struct pci_dev *dev = efx->pci_dev;
- int rc;
+ struct efx_rep *efv, *next;
+ int rc, i;
efx->vf_count = num_vfs;
rc = pci_enable_sriov(dev, num_vfs);
if (rc)
- goto fail;
+ goto fail1;
+ if (!nic_data->grp_mae)
+ return 0;
+
+ for (i = 0; i < num_vfs; i++) {
+ rc = efx_ef100_vfrep_create(efx, i);
+ if (rc)
+ goto fail2;
+ }
return 0;
-fail:
+fail2:
+ list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
+ efx_ef100_vfrep_destroy(efx, efv);
+ pci_disable_sriov(dev);
+fail1:
netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n");
efx->vf_count = 0;
return rc;
}
-int efx_ef100_pci_sriov_disable(struct efx_nic *efx)
+int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
unsigned int vfs_assigned;
vfs_assigned = pci_vfs_assigned(dev);
- if (vfs_assigned) {
+ if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
"please detach them before disabling SR-IOV\n");
return -EBUSY;
}
- pci_disable_sriov(dev);
-
+ efx_ef100_fini_vfreps(efx);
+ if (!vfs_assigned)
+ pci_disable_sriov(dev);
return 0;
}
int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs)
{
if (num_vfs == 0)
- return efx_ef100_pci_sriov_disable(efx);
+ return efx_ef100_pci_sriov_disable(efx, false);
else
return efx_ef100_pci_sriov_enable(efx, num_vfs);
}
diff --git a/drivers/net/ethernet/sfc/ef100_sriov.h b/drivers/net/ethernet/sfc/ef100_sriov.h
index c48fccd46c57..8ffdf464dd1d 100644
--- a/drivers/net/ethernet/sfc/ef100_sriov.h
+++ b/drivers/net/ethernet/sfc/ef100_sriov.h
@@ -11,4 +11,4 @@
#include "net_driver.h"
int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs);
-int efx_ef100_pci_sriov_disable(struct efx_nic *efx);
+int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 26ef51d6b542..102ddc7e206a 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -254,7 +254,8 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
const struct sk_buff *skb,
- unsigned int segment_count)
+ unsigned int segment_count,
+ struct efx_rep *efv)
{
unsigned int old_write_count = tx_queue->write_count;
unsigned int new_write_count = old_write_count;
@@ -272,6 +273,20 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
else
next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
+ if (unlikely(efv)) {
+ /* Create TX override descriptor */
+ write_ptr = new_write_count & tx_queue->ptr_mask;
+ txd = ef100_tx_desc(tx_queue, write_ptr);
+ ++new_write_count;
+
+ tx_queue->packet_write_count = new_write_count;
+ EFX_POPULATE_OWORD_3(*txd,
+ ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX,
+ ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport,
+ ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1);
+ nr_descs--;
+ }
+
/* if it's a raw write (such as XDP) then always SEND single frames */
if (!skb)
nr_descs = 1;
@@ -306,6 +321,9 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
/* if it's a raw write (such as XDP) then always SEND */
next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
ESE_GZ_TX_DESC_TYPE_SEND;
+ /* mark as an EFV buffer if applicable */
+ if (unlikely(efv))
+ buffer->flags |= EFX_TX_BUF_EFV;
} while (new_write_count != tx_queue->insert_count);
@@ -324,7 +342,7 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
void ef100_tx_write(struct efx_tx_queue *tx_queue)
{
- ef100_tx_make_descriptors(tx_queue, NULL, 0);
+ ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL);
ef100_tx_push_buffers(tx_queue);
}
@@ -351,6 +369,12 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
*/
int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
+ return __ef100_enqueue_skb(tx_queue, skb, NULL);
+}
+
+int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ struct efx_rep *efv)
+{
unsigned int old_insert_count = tx_queue->insert_count;
struct efx_nic *efx = tx_queue->efx;
bool xmit_more = netdev_xmit_more();
@@ -376,16 +400,64 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return 0;
}
+ if (unlikely(efv)) {
+ struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ /* Drop representor packets if the queue is stopped.
+ * We currently don't assert backoff to representors so this is
+ * to make sure representor traffic can't starve the main
+ * net device.
+ * And, of course, if there are no TX descriptors left.
+ */
+ if (netif_tx_queue_stopped(tx_queue->core_txq) ||
+ unlikely(efx_tx_buffer_in_use(buffer))) {
+ atomic64_inc(&efv->stats.tx_errors);
+ rc = -ENOSPC;
+ goto err;
+ }
+
+ /* Also drop representor traffic if it could cause us to
+ * stop the queue. If we assert backoff and we haven't
+ * received traffic on the main net device recently then the
+ * TX watchdog can go off erroneously.
+ */
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
+ fill_level += efx_tx_max_skb_descs(efx);
+ if (fill_level > efx->txq_stop_thresh) {
+ struct efx_tx_queue *txq2;
+
+ /* Refresh cached fill level and re-check */
+ efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
+
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
+ fill_level += efx_tx_max_skb_descs(efx);
+ if (fill_level > efx->txq_stop_thresh) {
+ atomic64_inc(&efv->stats.tx_errors);
+ rc = -ENOSPC;
+ goto err;
+ }
+ }
+
+ buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV;
+ tx_queue->insert_count++;
+ }
+
/* Map for DMA and create descriptors */
rc = efx_tx_map_data(tx_queue, skb, segments);
if (rc)
goto err;
- ef100_tx_make_descriptors(tx_queue, skb, segments);
+ ef100_tx_make_descriptors(tx_queue, skb, segments, efv);
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level > efx->txq_stop_thresh) {
struct efx_tx_queue *txq2;
+ /* Because of checks above, representor traffic should
+ * not be able to stop the queue.
+ */
+ WARN_ON(efv);
+
netif_tx_stop_queue(tx_queue->core_txq);
/* Re-read after a memory barrier in case we've raced with
* the completion path. Otherwise there's a danger we'll never
@@ -404,8 +476,12 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* If xmit_more then we don't need to push the doorbell, unless there
* are 256 descriptors already queued in which case we have to push to
* ensure we never push more than 256 at once.
+ *
+ * Always push for representor traffic, and don't account it to parent
+ * PF netdevice's BQL.
*/
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
+ if (unlikely(efv) ||
+ __netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
tx_queue->write_count - tx_queue->notify_count > 255)
ef100_tx_push_buffers(tx_queue);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
index ddc4b98fa6db..e9e11540fcde 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.h
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -13,6 +13,7 @@
#define EFX_EF100_TX_H
#include "net_driver.h"
+#include "ef100_rep.h"
int ef100_tx_probe(struct efx_tx_queue *tx_queue);
void ef100_tx_init(struct efx_tx_queue *tx_queue);
@@ -22,4 +23,6 @@ unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ struct efx_rep *efv);
#endif
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 92550c7e85ce..9aae0d8b713f 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -501,14 +501,11 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
efx_device_detach_sync(vf->efx);
efx_net_stop(vf->efx->net_dev);
- down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
- if (rc) {
- up_write(&vf->efx->filter_sem);
+ if (rc)
return rc;
- }
}
rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
@@ -539,12 +536,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
if (vf->efx) {
/* VF cannot use the vport_id that the PF created */
rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
- if (rc) {
- up_write(&vf->efx->filter_sem);
+ if (rc)
return rc;
- }
vf->efx->type->filter_table_probe(vf->efx);
- up_write(&vf->efx->filter_sem);
efx_net_open(vf->efx->net_dev);
efx_device_attach_if_not_resetting(vf->efx);
}
@@ -580,7 +574,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
efx_net_stop(vf->efx->net_dev);
mutex_lock(&vf->efx->mac_lock);
- down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
@@ -654,7 +647,6 @@ restore_filters:
if (rc2)
goto reset_nic_up_write;
- up_write(&vf->efx->filter_sem);
mutex_unlock(&vf->efx->mac_lock);
rc2 = efx_net_open(vf->efx->net_dev);
@@ -666,10 +658,8 @@ restore_filters:
return rc;
reset_nic_up_write:
- if (vf->efx) {
- up_write(&vf->efx->filter_sem);
+ if (vf->efx)
mutex_unlock(&vf->efx->mac_lock);
- }
reset_nic:
if (vf->efx) {
netif_err(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 5a772354da83..153d68e29b8b 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -106,14 +106,6 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
- } while (0)
-
/**************************************************************************
*
* Port handling
@@ -378,6 +370,8 @@ static int efx_probe_all(struct efx_nic *efx)
if (rc)
goto fail5;
+ efx->state = STATE_NET_DOWN;
+
return 0;
fail5:
@@ -498,7 +492,7 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
*/
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
@@ -523,7 +517,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/* Context: process, rtnl_lock() held. */
int efx_net_open(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
@@ -544,6 +538,9 @@ int efx_net_open(struct net_device *net_dev)
efx_start_all(efx);
if (efx->state == STATE_DISABLED || efx->reset_pending)
netif_device_detach(efx->net_dev);
+ else
+ efx->state = STATE_NET_UP;
+
efx_selftest_async_start(efx);
return 0;
}
@@ -554,7 +551,7 @@ int efx_net_open(struct net_device *net_dev)
*/
int efx_net_stop(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
@@ -567,7 +564,7 @@ int efx_net_stop(struct net_device *net_dev)
static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_add_vid)
return efx->type->vlan_rx_add_vid(efx, proto, vid);
@@ -577,7 +574,7 @@ static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid
static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->vlan_rx_kill_vid)
return efx->type->vlan_rx_kill_vid(efx, proto, vid);
@@ -646,7 +643,7 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
/* Context: process, rtnl_lock() held. */
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -659,7 +656,7 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
@@ -681,7 +678,7 @@ static int efx_netdev_event(struct notifier_block *this,
if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
- efx_update_name(netdev_priv(net_dev));
+ efx_update_name(efx_netdev_priv(net_dev));
return NOTIFY_DONE;
}
@@ -720,8 +717,6 @@ static int efx_register_netdev(struct efx_nic *efx)
* already requested. If so, the NIC is probably hosed so we
* abort.
*/
- efx->state = STATE_READY;
- smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
rc = -EIO;
@@ -748,6 +743,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_associate(efx);
+ efx->state = STATE_NET_DOWN;
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -777,7 +774,8 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (!efx->net_dev)
return;
- BUG_ON(netdev_priv(efx->net_dev) != efx);
+ if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx))
+ return;
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
@@ -845,7 +843,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
/* Flush reset_work. It can no longer be scheduled since we
* are not READY.
*/
- BUG_ON(efx->state == STATE_READY);
+ WARN_ON(efx_net_active(efx->state));
efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
@@ -863,6 +861,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
+ struct efx_probe_data *probe_data;
struct efx_nic *efx;
efx = pci_get_drvdata(pci_dev);
@@ -887,10 +886,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
efx_fini_io(efx);
- netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+ pci_dbg(efx->pci_dev, "shutdown successful\n");
efx_fini_struct(efx);
free_netdev(efx->net_dev);
+ probe_data = container_of(efx, struct efx_probe_data, efx);
+ kfree(probe_data);
pci_disable_pcie_error_reporting(pci_dev);
};
@@ -1044,24 +1045,34 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
static int efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
+ struct efx_probe_data *probe_data, **probe_ptr;
struct net_device *net_dev;
struct efx_nic *efx;
int rc;
- /* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
- EFX_MAX_RX_QUEUES);
+ /* Allocate probe data and struct efx_nic */
+ probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
+ if (!probe_data)
+ return -ENOMEM;
+ probe_data->pci_dev = pci_dev;
+ efx = &probe_data->efx;
+
+ /* Allocate and initialise a struct net_device */
+ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
if (!net_dev)
return -ENOMEM;
- efx = netdev_priv(net_dev);
+ probe_ptr = netdev_priv(net_dev);
+ *probe_ptr = probe_data;
+ efx->net_dev = net_dev;
efx->type = (const struct efx_nic_type *) entry->driver_data;
efx->fixed_features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
+ efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
@@ -1150,13 +1161,13 @@ static int efx_pm_freeze(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
- efx->state = STATE_UNINIT;
-
+ if (efx_net_active(efx->state)) {
efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_disable_interrupts(efx);
+
+ efx->state = efx_freeze(efx->state);
}
rtnl_unlock();
@@ -1171,7 +1182,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_lock();
- if (efx->state != STATE_DISABLED) {
+ if (efx_frozen(efx->state)) {
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
@@ -1184,7 +1195,7 @@ static int efx_pm_thaw(struct device *dev)
efx_device_attach_if_not_resetting(efx);
- efx->state = STATE_READY;
+ efx->state = efx_thaw(efx->state);
efx->type->resume_wol(efx);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index c05a83da9e44..4239c7ece123 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -12,6 +12,7 @@
#include "net_driver.h"
#include "ef100_rx.h"
#include "ef100_tx.h"
+#include "efx_common.h"
#include "filter.h"
int efx_net_open(struct net_device *net_dev);
@@ -206,6 +207,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
{
struct net_device *dev = efx->net_dev;
+ /* We must stop reps (which use our TX) before we stop ourselves. */
+ efx_detach_reps(efx);
+
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
@@ -217,8 +221,11 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
{
- if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
+ if ((efx->state != STATE_DISABLED) && !efx->reset_pending) {
netif_device_attach(efx->net_dev);
+ if (efx->state == STATE_NET_UP)
+ efx_attach_reps(efx);
+ }
}
static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index f6577e74d6e6..a929a1aaba92 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -24,6 +24,7 @@
#include "mcdi_port_common.h"
#include "io.h"
#include "mcdi_pcol.h"
+#include "ef100_rep.h"
static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
@@ -167,7 +168,7 @@ static void efx_mac_work(struct work_struct *data)
int efx_set_mac_address(struct net_device *net_dev, void *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct sockaddr *addr = data;
u8 *new_addr = addr->sa_data;
u8 old_addr[6];
@@ -202,7 +203,7 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* Context: netif_addr_lock held, BHs disabled. */
void efx_set_rx_mode(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
@@ -211,7 +212,7 @@ void efx_set_rx_mode(struct net_device *net_dev)
int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
/* If disabling RX n-tuple filtering, clear existing filters */
@@ -285,7 +286,7 @@ unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
/* Context: process, rtnl_lock() held. */
int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx_check_disabled(efx);
@@ -600,7 +601,7 @@ void efx_stop_all(struct efx_nic *efx)
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
efx_nic_update_stats_atomic(efx, NULL, stats);
@@ -723,7 +724,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
/* Context: netif_tx_lock held, BHs disabled. */
void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
netif_err(efx, tx_err, efx->net_dev,
"TX stuck with port_enabled=%d: resetting channels\n",
@@ -898,7 +899,7 @@ static void efx_reset_work(struct work_struct *data)
* have changed by now. Now that we have the RTNL lock,
* it cannot change again.
*/
- if (efx->state == STATE_READY)
+ if (efx_net_active(efx->state))
(void)efx_reset(efx, method);
rtnl_unlock();
@@ -908,7 +909,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
- if (efx->state == STATE_RECOVERY) {
+ if (efx_recovering(efx->state)) {
netif_dbg(efx, drv, efx->net_dev,
"recovering: skip scheduling %s reset\n",
RESET_TYPE(type));
@@ -943,7 +944,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (READ_ONCE(efx->state) != STATE_READY)
+ if (!efx_net_active(READ_ONCE(efx->state)))
return;
/* efx_process_channel() will no longer read events once a
@@ -978,8 +979,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx) {}
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
{
int rc = -ENOMEM;
@@ -998,7 +998,6 @@ int efx_init_struct(struct efx_nic *efx,
efx->state = STATE_UNINIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
- efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
efx->rx_ip_align =
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
@@ -1023,7 +1022,8 @@ int efx_init_struct(struct efx_nic *efx,
efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
- efx->mdio.dev = net_dev;
+ spin_lock_init(&efx->vf_reps_lock);
+ INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
@@ -1077,13 +1077,11 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
int rc;
efx->mem_bar = UINT_MAX;
-
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar);
+ pci_dbg(pci_dev, "initialising I/O bar=%d\n", bar);
rc = pci_enable_device(pci_dev);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to enable PCI device\n");
+ pci_err(pci_dev, "failed to enable PCI device\n");
goto fail1;
}
@@ -1091,42 +1089,40 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "could not find a suitable DMA mask\n");
+ pci_err(efx->pci_dev, "could not find a suitable DMA mask\n");
goto fail2;
}
- netif_dbg(efx, probe, efx->net_dev,
- "using DMA mask %llx\n", (unsigned long long)dma_mask);
+ pci_dbg(efx->pci_dev, "using DMA mask %llx\n", (unsigned long long)dma_mask);
efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
if (!efx->membase_phys) {
- netif_err(efx, probe, efx->net_dev,
- "ERROR: No BAR%d mapping from the BIOS. "
- "Try pci=realloc on the kernel command line\n", bar);
+ pci_err(efx->pci_dev,
+ "ERROR: No BAR%d mapping from the BIOS. Try pci=realloc on the kernel command line\n",
+ bar);
rc = -ENODEV;
goto fail3;
}
rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "request for memory BAR[%d] failed\n", bar);
+ pci_err(efx->pci_dev,
+ "request for memory BAR[%d] failed\n", bar);
rc = -EIO;
goto fail3;
}
efx->mem_bar = bar;
efx->membase = ioremap(efx->membase_phys, mem_map_size);
if (!efx->membase) {
- netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR[%d] at %llx+%x\n", bar,
- (unsigned long long)efx->membase_phys, mem_map_size);
+ pci_err(efx->pci_dev,
+ "could not map memory BAR[%d] at %llx+%x\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
- netif_dbg(efx, probe, efx->net_dev,
- "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
- (unsigned long long)efx->membase_phys, mem_map_size,
- efx->membase);
+ pci_dbg(efx->pci_dev,
+ "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
return 0;
@@ -1142,7 +1138,7 @@ fail1:
void efx_fini_io(struct efx_nic *efx)
{
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+ pci_dbg(efx->pci_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
@@ -1217,13 +1213,15 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx->state = STATE_RECOVERY;
+ efx->state = efx_recover(efx->state);
efx->reset_pending = 0;
efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
+ if (efx_net_active(efx->state)) {
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+ }
status = PCI_ERS_RESULT_NEED_RESET;
} else {
@@ -1271,7 +1269,7 @@ static void efx_io_resume(struct pci_dev *pdev)
netif_err(efx, hw, efx->net_dev,
"efx_reset failed after PCI error (%d)\n", rc);
} else {
- efx->state = STATE_READY;
+ efx->state = efx_recovered(efx->state);
netif_dbg(efx, hw, efx->net_dev,
"Done resetting and resuming IO after PCI error.\n");
}
@@ -1357,7 +1355,7 @@ static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
- struct efx_nic *efx = netdev_priv(dev);
+ struct efx_nic *efx = efx_netdev_priv(dev);
if (skb->encapsulation) {
if (features & NETIF_F_GSO_MASK)
@@ -1378,7 +1376,7 @@ netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev
int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->get_phys_port_id)
return efx->type->get_phys_port_id(efx, ppid);
@@ -1388,9 +1386,44 @@ int efx_get_phys_port_id(struct net_device *net_dev,
int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (snprintf(name, len, "p%u", efx->port_num) >= len)
return -EINVAL;
return 0;
}
+
+void efx_detach_reps(struct efx_nic *efx)
+{
+ struct net_device *rep_dev;
+ struct efx_rep *efv;
+
+ ASSERT_RTNL();
+ netif_dbg(efx, drv, efx->net_dev, "Detaching VF representors\n");
+ list_for_each_entry(efv, &efx->vf_reps, list) {
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ continue;
+ netif_carrier_off(rep_dev);
+ /* See efx_device_detach_sync() */
+ netif_tx_lock_bh(rep_dev);
+ netif_tx_stop_all_queues(rep_dev);
+ netif_tx_unlock_bh(rep_dev);
+ }
+}
+
+void efx_attach_reps(struct efx_nic *efx)
+{
+ struct net_device *rep_dev;
+ struct efx_rep *efv;
+
+ ASSERT_RTNL();
+ netif_dbg(efx, drv, efx->net_dev, "Attaching VF representors\n");
+ list_for_each_entry(efv, &efx->vf_reps, list) {
+ rep_dev = efv->net_dev;
+ if (!rep_dev)
+ continue;
+ netif_tx_wake_all_queues(rep_dev);
+ netif_carrier_on(rep_dev);
+ }
+}
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 65513fd0cf6c..2c54dac3e662 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -14,8 +14,7 @@
int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
unsigned int mem_map_size);
void efx_fini_io(struct efx_nic *efx);
-int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
- struct net_device *net_dev);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev);
void efx_fini_struct(struct efx_nic *efx);
#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -43,12 +42,11 @@ void efx_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
int efx_reconfigure_port(struct efx_nic *efx);
-#define EFX_ASSERT_RESET_SERIALISED(efx) \
- do { \
- if ((efx->state == STATE_READY) || \
- (efx->state == STATE_RECOVERY) || \
- (efx->state == STATE_DISABLED)) \
- ASSERT_RTNL(); \
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx)->state != STATE_UNINIT && \
+ (efx)->state != STATE_PROBED) \
+ ASSERT_RTNL(); \
} while (0)
int efx_try_recovery(struct efx_nic *efx);
@@ -64,7 +62,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx);
static inline int efx_check_disabled(struct efx_nic *efx)
{
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ if (efx->state == STATE_DISABLED || efx_recovering(efx->state)) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
@@ -113,4 +111,7 @@ int efx_get_phys_port_id(struct net_device *net_dev,
int efx_get_phys_port_name(struct net_device *net_dev,
char *name, size_t len);
+
+void efx_detach_reps(struct efx_nic *efx);
+void efx_attach_reps(struct efx_nic *efx);
#endif
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 48506373721a..364323599f7b 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -33,7 +33,7 @@
static int efx_ethtool_phys_id(struct net_device *net_dev,
enum ethtool_phys_id_state state)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
enum efx_led_mode mode = EFX_LED_DEFAULT;
switch (state) {
@@ -55,13 +55,13 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
- return efx_nic_get_regs_len(netdev_priv(net_dev));
+ return efx_nic_get_regs_len(efx_netdev_priv(net_dev));
}
static void efx_ethtool_get_regs(struct net_device *net_dev,
struct ethtool_regs *regs, void *buf)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
regs->version = efx->type->revision;
efx_nic_get_regs(efx, buf);
@@ -101,7 +101,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
unsigned int tx_usecs, rx_usecs;
bool rx_adaptive;
@@ -121,7 +121,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_channel *channel;
unsigned int tx_usecs, rx_usecs;
bool adaptive, rx_may_override_tx;
@@ -163,7 +163,7 @@ efx_ethtool_get_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
@@ -177,7 +177,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
@@ -204,7 +204,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->get_wol(efx, wol);
}
@@ -212,14 +212,14 @@ static void efx_ethtool_get_wol(struct net_device *net_dev,
static int efx_ethtool_set_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->set_wol(efx, wol->wolopts);
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
struct ethtool_fec_stats *fec_stats)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->get_fec_stats)
efx->type->get_fec_stats(efx, fec_stats);
@@ -228,7 +228,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
struct ethtool_ts_info *ts_info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Software capabilities */
ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bd552c7dffcb..bc840ede3053 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -91,6 +91,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mport_bad),
#ifdef CONFIG_RFS_ACCEL
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
@@ -103,7 +104,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
efx_mcdi_print_fwver(efx, info->fw_version,
@@ -113,14 +114,14 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->msg_enable;
}
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
efx->msg_enable = msg_enable;
}
@@ -128,7 +129,7 @@ void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
bool already_up;
int rc = -ENOMEM;
@@ -137,7 +138,7 @@ void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests)
goto fail;
- if (efx->state != STATE_READY) {
+ if (!efx_net_active(efx->state)) {
rc = -EBUSY;
goto out;
}
@@ -176,7 +177,7 @@ fail:
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
@@ -186,7 +187,7 @@ void efx_ethtool_get_pauseparam(struct net_device *net_dev,
int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
int rc = 0;
@@ -441,7 +442,7 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
switch (string_set) {
case ETH_SS_STATS:
@@ -459,7 +460,7 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int i;
switch (string_set) {
@@ -487,7 +488,7 @@ void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
@@ -561,7 +562,7 @@ void efx_ethtool_get_stats(struct net_device *net_dev,
int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
mutex_lock(&efx->mac_lock);
@@ -584,7 +585,7 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *cmd)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
@@ -604,7 +605,7 @@ int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
int efx_ethtool_get_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -617,7 +618,7 @@ int efx_ethtool_get_fecparam(struct net_device *net_dev,
int efx_ethtool_set_fecparam(struct net_device *net_dev,
struct ethtool_fecparam *fecparam)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
mutex_lock(&efx->mac_lock);
@@ -809,7 +810,7 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
u32 rss_context = 0;
s32 rc = 0;
@@ -1127,7 +1128,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx_filter_get_rx_id_limit(efx) == 0)
return -EOPNOTSUPP;
@@ -1148,7 +1149,7 @@ int efx_ethtool_set_rxnfc(struct net_device *net_dev,
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->n_rx_channels == 1)
return 0;
@@ -1157,7 +1158,7 @@ u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
return efx->type->rx_hash_key_size;
}
@@ -1165,7 +1166,7 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->rx_pull_rss_config(efx);
@@ -1186,7 +1187,7 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
@@ -1205,7 +1206,7 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
int rc = 0;
@@ -1238,7 +1239,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
const u8 hfunc, u32 *rss_context,
bool delete)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
bool allocated = false;
int rc;
@@ -1300,7 +1301,7 @@ out_unlock:
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
rc = efx->type->map_reset_flags(flags);
@@ -1314,7 +1315,7 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int ret;
mutex_lock(&efx->mac_lock);
@@ -1327,7 +1328,7 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
int efx_ethtool_get_module_info(struct net_device *net_dev,
struct ethtool_modinfo *modinfo)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
int ret;
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/falcon/bitfield.h b/drivers/net/ethernet/sfc/falcon/bitfield.h
index 5eb178d0c149..78537a53009e 100644
--- a/drivers/net/ethernet/sfc/falcon/bitfield.h
+++ b/drivers/net/ethernet/sfc/falcon/bitfield.h
@@ -117,7 +117,7 @@ typedef union ef4_oword {
*
* ( element ) << 4
*
- * The result will contain the relevant bits filled in in the range
+ * The result will contain the relevant bits filled in the range
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EF4_EXTRACT_NATIVE(native_element, min, max, low, high) \
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 2c91792cec01..c64623c2e80c 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -2711,7 +2711,7 @@ void ef4_farch_filter_table_remove(struct ef4_nic *efx)
enum ef4_farch_filter_table_id table_id;
for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
+ bitmap_free(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
@@ -2740,9 +2740,7 @@ int ef4_farch_filter_table_probe(struct ef4_nic *efx)
table = &state->table[table_id];
if (table->size == 0)
continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
+ table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(array_size(sizeof(*table->spec),
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a381cf9ec4f3..a2c7139f2b32 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -679,7 +679,7 @@ union ef4_multicast_hash {
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
* @rx_ip_align: RX DMA address offset to have IP header aligned in
- * in accordance with NET_IP_ALIGN
+ * accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 40b2af8bfb81..4d928839d292 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -88,6 +88,7 @@ enum efx_filter_priority {
* the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
+ * @EFX_FILTER_FLAG_VPORT_ID: Virtual port ID for adapter switching.
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
@@ -95,6 +96,7 @@ enum efx_filter_flags {
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
+ EFX_FILTER_FLAG_VPORT_ID = 0x20,
};
/** enum efx_encap_type - types of encapsulation
@@ -127,6 +129,9 @@ enum efx_encap_type {
* MCFW context_id.
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter
+ * @vport_id: Virtual port ID associated with RX queue, for adapter switching,
+ * if %EFX_FILTER_FLAG_VPORT_ID is set. This is an MCFW vport_id, or on
+ * EF100 an mport selector.
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
@@ -156,6 +161,7 @@ struct efx_filter_spec {
u32 priority:2;
u32 flags:6;
u32 dmaq_id:12;
+ u32 vport_id;
u32 rss_context;
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
__be16 inner_vid;
@@ -292,6 +298,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
return 0;
}
+/**
+ * efx_filter_set_vport_id - override virtual port id relating to filter
+ * @spec: Specification to initialise
+ * @vport_id: firmware ID of the virtual port
+ */
+static inline void efx_filter_set_vport_id(struct efx_filter_spec *spec,
+ u32 vport_id)
+{
+ spec->flags |= EFX_FILTER_FLAG_VPORT_ID;
+ spec->vport_id = vport_id;
+}
+
static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
enum efx_encap_type encap_type)
{
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
new file mode 100644
index 000000000000..97627f5e3674
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "mae.h"
+#include "mcdi.h"
+#include "mcdi_pcol_mae.h"
+
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ if (WARN_ON_ONCE(!id))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!label))
+ return -EINVAL;
+
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE,
+ MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS);
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT,
+ MAE_MPORT_SELECTOR_ASSIGNED);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID);
+ *label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL);
+ return 0;
+}
+
+int efx_mae_free_mport(struct efx_nic *efx, u32 id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN);
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id);
+ return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_2(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT,
+ MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_3(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
+ MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
+ MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_3(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
+ MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
+ MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+/* Constructs an mport selector from an mport ID, because they're not the same */
+void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_2(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID,
+ MAE_MPORT_SELECTOR_MPORT_ID, mport_id);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+/* id is really only 24 bits wide */
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR, selector);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_LOOKUP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID);
+ return 0;
+}
+
+static bool efx_mae_asl_id(u32 id)
+{
+ return !!(id & BIT(31));
+}
+
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
+ MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
+ MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
+ MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
+ if (act->deliver)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER,
+ act->dest_mport);
+ BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID);
+ /* We rely on the high bit of AS IDs always being clear.
+ * The firmware API guarantees this, but let's check it ourselves.
+ */
+ if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ return -EIO;
+ }
+ return 0;
+}
+
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what action-sets exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id))
+ return -EIO;
+ return 0;
+}
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+ struct efx_tc_action_set *act;
+ size_t inlen, outlen, i = 0;
+ efx_dword_t *inbuf;
+ int rc;
+
+ list_for_each_entry(act, &acts->list, list)
+ i++;
+ if (i == 0)
+ return -EINVAL;
+ if (i == 1) {
+ /* Don't wrap an ASL around a single AS, just use the AS_ID
+ * directly. ASLs are a more limited resource.
+ */
+ act = list_first_entry(&acts->list, struct efx_tc_action_set, list);
+ acts->fw_id = act->fw_id;
+ return 0;
+ }
+ if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2)
+ return -EOPNOTSUPP; /* Too many actions */
+ inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+ i = 0;
+ list_for_each_entry(act, &acts->list, list) {
+ MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS,
+ i, act->fw_id);
+ i++;
+ }
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto out_free;
+ if (outlen < sizeof(outbuf)) {
+ rc = -EIO;
+ goto out_free;
+ }
+ acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+ /* We rely on the high bit of ASL IDs always being set.
+ * The firmware API guarantees this, but let's check it ourselves.
+ */
+ if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) {
+ efx_mae_free_action_set_list(efx, acts);
+ rc = -EIO;
+ }
+out_free:
+ kfree(inbuf);
+ return rc;
+}
+
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ /* If this is just an AS_ID with no ASL wrapper, then there is
+ * nothing for us to free. (The AS will be freed later.)
+ */
+ if (efx_mae_asl_id(acts->fw_id)) {
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID,
+ acts->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what action-set-lists exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id))
+ return -EIO;
+ }
+ /* We're probably about to free @acts, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL;
+ return 0;
+}
+
+static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
+ const struct efx_tc_match *match)
+{
+ if (match->mask.ingress_port) {
+ if (~match->mask.ingress_port)
+ return -EOPNOTSUPP;
+ MCDI_STRUCT_SET_DWORD(match_crit,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR,
+ match->value.ingress_port);
+ }
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
+ match->mask.ingress_port);
+ return 0;
+}
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+ u32 prio, u32 acts_id, u32 *id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
+ MCDI_DECLARE_STRUCT_PTR(match_crit);
+ MCDI_DECLARE_STRUCT_PTR(response);
+ size_t outlen;
+ int rc;
+
+ if (!id)
+ return -EINVAL;
+
+ match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA);
+ response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE);
+ if (efx_mae_asl_id(acts_id)) {
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID,
+ MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL);
+ } else {
+ /* We only had one AS, so we didn't wrap it in an ASL */
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID,
+ MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id);
+ }
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio);
+ rc = efx_mae_populate_match_criteria(match_crit, match);
+ if (rc)
+ return rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
+ return 0;
+}
+
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what rules exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id))
+ return -EIO;
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
new file mode 100644
index 000000000000..0369be4d8983
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF100_MAE_H
+#define EF100_MAE_H
+/* MCDI interface for the ef100 Match-Action Engine */
+
+#include "net_driver.h"
+#include "tc.h"
+#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
+
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label);
+int efx_mae_free_mport(struct efx_nic *efx, u32 id);
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
+void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
+void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
+void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
+
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts);
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts);
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+ u32 prio, u32 acts_id, u32 *id);
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
+
+#endif /* EF100_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 50baf62b2cbc..af338208eae9 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -99,14 +99,12 @@ int efx_mcdi_init(struct efx_nic *efx)
*/
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unable to register driver with MCPU\n");
+ pci_err(efx->pci_dev, "Unable to register driver with MCPU\n");
goto fail2;
}
if (already_attached)
/* Not a fatal error */
- netif_err(efx, probe, efx->net_dev,
- "Host already registered with MCPU\n");
+ pci_err(efx->pci_dev, "Host already registered with MCPU\n");
if (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
@@ -1261,7 +1259,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
}
/* The MC is going down in to BIST mode. set the BIST flag to block
- * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset
* (which doesn't actually execute a reset, it waits for the controlling
* function to reset it).
*/
@@ -1447,7 +1445,7 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
return;
fail:
- netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
buf[0] = 0;
}
@@ -1471,8 +1469,9 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* care what firmware we get.
*/
if (rc == -EPERM) {
- netif_dbg(efx, probe, efx->net_dev,
- "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ pci_dbg(efx->pci_dev,
+ "%s with fw-variant setting failed EPERM, trying without it\n",
+ __func__);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
MC_CMD_FW_DONT_CARE);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
@@ -1514,7 +1513,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
return 0;
fail:
- netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -2130,6 +2129,52 @@ fail:
return rc;
}
+/* Failure to read a privilege mask is never fatal, because we can always
+ * carry on as though we didn't have the privilege we were interested in.
+ * So use efx_mcdi_rpc_quiet().
+ */
+int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
+{
+ MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
+ MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
+ size_t outlen;
+ u16 pf, vf;
+ int rc;
+
+ if (!efx || !mask)
+ return -EINVAL;
+
+ /* Get our function number */
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0,
+ fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN,
+ &outlen);
+ if (rc != 0)
+ return rc;
+ if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN)
+ return -EIO;
+
+ pf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_PF);
+ vf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_VF);
+
+ MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PRIVILEGE_MASK,
+ pm_inbuf, sizeof(pm_inbuf),
+ pm_outbuf, sizeof(pm_outbuf), &outlen);
+
+ if (rc != 0)
+ return rc;
+ if (outlen < MC_CMD_PRIVILEGE_MASK_OUT_LEN)
+ return -EIO;
+
+ *mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ return 0;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 69c2924a147c..26bc69f76801 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -205,6 +205,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+#define _MCDI_STRUCT_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
@@ -214,6 +216,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
#define MCDI_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
@@ -366,6 +370,7 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
unsigned int *flags);
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out);
+int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 1523be77b9db..4ff6586116ee 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -221,7 +221,10 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
+ if (flags & EFX_FILTER_FLAG_VPORT_ID)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, spec->vport_id);
+ else
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -488,6 +491,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id;
+ saved_spec->vport_id = spec->vport_id;
}
} else if (!replacing) {
kfree(saved_spec);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index 06426aa9f2f3..c0d6558b9fd2 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -89,6 +89,7 @@ struct efx_mcdi_filter_table {
*/
bool mc_chaining;
bool vlan_filter;
+ /* Entries on the vlan_list are added/removed under filter_sem */
struct list_head vlan_list;
};
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index ff617b1b38d3..cd297e19cddc 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -165,138 +165,8 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
-/* Operation not permitted. */
-#define MC_CMD_ERR_EPERM 1
-/* Non-existent command target */
-#define MC_CMD_ERR_ENOENT 2
-/* assert() has killed the MC */
-#define MC_CMD_ERR_EINTR 4
-/* I/O failure */
-#define MC_CMD_ERR_EIO 5
-/* Already exists */
-#define MC_CMD_ERR_EEXIST 6
-/* Try again */
-#define MC_CMD_ERR_EAGAIN 11
-/* Out of memory */
-#define MC_CMD_ERR_ENOMEM 12
-/* Caller does not hold required locks */
-#define MC_CMD_ERR_EACCES 13
-/* Resource is currently unavailable (e.g. lock contention) */
-#define MC_CMD_ERR_EBUSY 16
-/* No such device */
-#define MC_CMD_ERR_ENODEV 19
-/* Invalid argument to target */
-#define MC_CMD_ERR_EINVAL 22
-/* Broken pipe */
-#define MC_CMD_ERR_EPIPE 32
-/* Read-only */
-#define MC_CMD_ERR_EROFS 30
-/* Out of range */
-#define MC_CMD_ERR_ERANGE 34
-/* Non-recursive resource is already acquired */
-#define MC_CMD_ERR_EDEADLK 35
-/* Operation not implemented */
-#define MC_CMD_ERR_ENOSYS 38
-/* Operation timed out */
-#define MC_CMD_ERR_ETIME 62
-/* Link has been severed */
-#define MC_CMD_ERR_ENOLINK 67
-/* Protocol error */
-#define MC_CMD_ERR_EPROTO 71
-/* Operation not supported */
-#define MC_CMD_ERR_ENOTSUP 95
-/* Address not available */
-#define MC_CMD_ERR_EADDRNOTAVAIL 99
-/* Not connected */
-#define MC_CMD_ERR_ENOTCONN 107
-/* Operation already in progress */
-#define MC_CMD_ERR_EALREADY 114
-
-/* Resource allocation failed. */
-#define MC_CMD_ERR_ALLOC_FAIL 0x1000
-/* V-adaptor not found. */
-#define MC_CMD_ERR_NO_VADAPTOR 0x1001
-/* EVB port not found. */
-#define MC_CMD_ERR_NO_EVB_PORT 0x1002
-/* V-switch not found. */
-#define MC_CMD_ERR_NO_VSWITCH 0x1003
-/* Too many VLAN tags. */
-#define MC_CMD_ERR_VLAN_LIMIT 0x1004
-/* Bad PCI function number. */
-#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
-/* Invalid VLAN mode. */
-#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
-/* Invalid v-switch type. */
-#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
-/* Invalid v-port type. */
-#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
-/* MAC address exists. */
-#define MC_CMD_ERR_MAC_EXIST 0x1009
-/* Slave core not present */
-#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
-/* The datapath is disabled. */
-#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
-/* The requesting client is not a function */
-#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
-/* The requested operation might require the
- command to be passed between MCs, and the
- transport doesn't support that. Should
- only ever been seen over the UART. */
-#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
-/* VLAN tag(s) exists */
-#define MC_CMD_ERR_VLAN_EXIST 0x100e
-/* No MAC address assigned to an EVB port */
-#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
-/* Notifies the driver that the request has been relayed
- * to an admin function for authorization. The driver should
- * wait for a PROXY_RESPONSE event and then resend its request.
- * This error code is followed by a 32-bit handle that
- * helps matching it with the respective PROXY_RESPONSE event. */
-#define MC_CMD_ERR_PROXY_PENDING 0x1010
-#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
-/* The request cannot be passed for authorization because
- * another request from the same function is currently being
- * authorized. The drvier should try again later. */
-#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
-/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
- * that has enabled proxying or BLOCK_INDEX points to a function that
- * doesn't await an authorization. */
-#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
-/* This code is currently only used internally in FW. Its meaning is that
- * an operation failed due to lack of SR-IOV privilege.
- * Normally it is translated to EPERM by send_cmd_err(),
- * but it may also be used to trigger some special mechanism
- * for handling such case, e.g. to relay the failed request
- * to a designated admin function for authorization. */
-#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
-/* Workaround 26807 could not be turned on/off because some functions
- * have already installed filters. See the comment at
- * MC_CMD_WORKAROUND_BUG26807.
- * May also returned for other operations such as sub-variant switching. */
-#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* The clock whose frequency you've attempted to set set
- * doesn't exist on this NIC */
-#define MC_CMD_ERR_NO_CLOCK 0x1015
-/* Returned by MC_CMD_TESTASSERT if the action that should
- * have caused an assertion failed to do so. */
-#define MC_CMD_ERR_UNREACHABLE 0x1016
-/* This command needs to be processed in the background but there were no
- * resources to do so. Send it again after a command has completed. */
-#define MC_CMD_ERR_QUEUE_FULL 0x1017
-/* The operation could not be completed because the PCIe link has gone
- * away. This error code is never expected to be returned over the TLP
- * transport. */
-#define MC_CMD_ERR_NO_PCIE 0x1018
-/* The operation could not be completed because the datapath has gone
- * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
- * datapath absence may be temporary*/
-#define MC_CMD_ERR_NO_DATAPATH 0x1019
-/* The operation could not complete because some VIs are allocated */
-#define MC_CMD_ERR_VIS_PRESENT 0x101a
-/* The operation could not complete because some PIO buffers are allocated */
-#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-
#define MC_CMD_ERR_CODE_OFST 0
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
/* We define 8 "escape" commands to allow
for command number space extension */
@@ -365,10 +235,857 @@
*/
#define MC_CMD_ERR_ARG_OFST 4
-/* No space */
-#define MC_CMD_ERR_ENOSPC 28
-
-/* MCDI_EVENT structuredef */
+/* MC_CMD_ERR enum: Public MCDI error codes. Error codes that correspond to
+ * POSIX errnos should use the same numeric values that linux does. Error codes
+ * specific to Solarflare firmware should use values in the range 0x1000 -
+ * 0x10ff. The range 0x2000 - 0x20ff is reserved for private error codes (see
+ * MC_CMD_ERR_PRIV below).
+ */
+/* enum: Operation not permitted. */
+#define MC_CMD_ERR_EPERM 0x1
+/* enum: Non-existent command target */
+#define MC_CMD_ERR_ENOENT 0x2
+/* enum: assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 0x4
+/* enum: I/O failure */
+#define MC_CMD_ERR_EIO 0x5
+/* enum: Already exists */
+#define MC_CMD_ERR_EEXIST 0x6
+/* enum: Try again */
+#define MC_CMD_ERR_EAGAIN 0xb
+/* enum: Out of memory */
+#define MC_CMD_ERR_ENOMEM 0xc
+/* enum: Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 0xd
+/* enum: Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 0x10
+/* enum: No such device */
+#define MC_CMD_ERR_ENODEV 0x13
+/* enum: Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 0x16
+/* enum: No space */
+#define MC_CMD_ERR_ENOSPC 0x1c
+/* enum: Read-only */
+#define MC_CMD_ERR_EROFS 0x1e
+/* enum: Broken pipe */
+#define MC_CMD_ERR_EPIPE 0x20
+/* enum: Out of range */
+#define MC_CMD_ERR_ERANGE 0x22
+/* enum: Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 0x23
+/* enum: Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 0x26
+/* enum: Operation timed out */
+#define MC_CMD_ERR_ETIME 0x3e
+/* enum: Link has been severed */
+#define MC_CMD_ERR_ENOLINK 0x43
+/* enum: Protocol error */
+#define MC_CMD_ERR_EPROTO 0x47
+/* enum: Bad message */
+#define MC_CMD_ERR_EBADMSG 0x4a
+/* enum: Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 0x5f
+/* enum: Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 0x63
+/* enum: Not connected */
+#define MC_CMD_ERR_ENOTCONN 0x6b
+/* enum: Operation already in progress */
+#define MC_CMD_ERR_EALREADY 0x72
+/* enum: Stale handle. The handle references a resource that no longer exists.
+ */
+#define MC_CMD_ERR_ESTALE 0x74
+/* enum: Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* enum: V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* enum: EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* enum: V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* enum: Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* enum: Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* enum: Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* enum: Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* enum: Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* enum: MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* enum: Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* enum: The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* enum: The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* enum: The requested operation might require the command to be passed between
+ * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * the UART.
+ */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* enum: VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* enum: No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* enum: Notifies the driver that the request has been relayed to an admin
+ * function for authorization. The driver should wait for a PROXY_RESPONSE
+ * event and then resend its request. This error code is followed by a 32-bit
+ * handle that helps matching it with the respective PROXY_RESPONSE event.
+ */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+/* enum: The request cannot be passed for authorization because another request
+ * from the same function is currently being authorized. The drvier should try
+ * again later.
+ */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* enum: Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that doesn't
+ * await an authorization.
+ */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* enum: This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege. Normally it is
+ * translated to EPERM by send_cmd_err(), but it may also be used to trigger
+ * some special mechanism for handling such case, e.g. to relay the failed
+ * request to a designated admin function for authorization.
+ */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* enum: Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. May also returned for other operations such as
+ * sub-variant switching.
+ */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+ * this NIC
+ */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* enum: Returned by MC_CMD_TESTASSERT if the action that should have caused an
+ * assertion failed to do so.
+ */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* enum: This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed.
+ */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* enum: The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport.
+ */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* enum: The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary
+ */
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* enum: The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* enum: The operation could not complete because some PIO buffers are
+ * allocated
+ */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+
+/* MC_CMD_FPGA_FLASH_INDEX enum */
+#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
+#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
+
+/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
+/* enum: Legacy mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
+/* enum: Switchdev mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
+/* enum: Bootstrap mode as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
+/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
+#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
+
+/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
+ * interfaces. There is a need to refer to interfaces explicitly from drivers
+ * (for example, a management driver on one interface administering a function
+ * on another interface). This enumeration provides stable identifiers to all
+ * interfaces present on a product. Product documentation will specify which
+ * interfaces exist and their associated identifier. In general, drivers,
+ * should not assign special meanings to specific values. Instead, behaviour
+ * should be determined by NIC configuration, which will identify interfaces
+ * where appropriate.
+ */
+/* enum: Primary host interfaces. Typically (i.e. for all known SFC products)
+ * the interface exposed on the edge connector (or form factor equivalent).
+ */
+#define PCIE_INTERFACE_HOST_PRIMARY 0x0
+/* enum: Riverhead and keystone products have a second PCIe interface to which
+ * an on-NIC ARM module is expected to be connected.
+ */
+#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: For MCDI commands issued over a PCIe interface, this value is
+ * translated into the interface over which the command was issued. Not
+ * meaningful for other MCDI transports.
+ */
+#define PCIE_INTERFACE_CALLER 0xffffffff
+
+/* MC_CLIENT_ID_SPECIFIER enum */
+/* enum: Equivalent to the caller's client ID */
+#define MC_CMD_CLIENT_ID_SELF 0xffffffff
+
+/* MAE_FIELD_SUPPORT_STATUS enum */
+/* enum: The NIC does not support this field. The driver must ensure that any
+ * mask associated with this field in a match rule is zeroed. The NIC may
+ * either reject requests with an invalid mask for such a field, or may assume
+ * that the mask is zero. (This category only exists to describe behaviour for
+ * fields that a newer driver might know about but that older firmware does
+ * not. It is recommended that firmware report MAE_FIELD_FIELD_MATCH_NEVER for
+ * all match fields defined at the time of its compilation. If a driver see a
+ * field support status value that it does not recognise, it must treat that
+ * field as thought the field was reported as MAE_FIELD_SUPPORTED_MATCH_NEVER,
+ * and must never set a non-zero mask value for this field.
+ */
+#define MAE_FIELD_UNSUPPORTED 0x0
+/* enum: The NIC supports this field, but cannot use it in a match rule. The
+ * driver must ensure that any mask for such a field in a match rule is zeroed.
+ * The NIC will reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_NEVER 0x1
+/* enum: The NIC supports this field, and must use it in all match rules. The
+ * driver must ensure that any mask for such a field is all ones. The NIC will
+ * reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_ALWAYS 0x2
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver must ensure that any mask for such a field is either all zeroes
+ * or all ones. The NIC will reject requests with an invalid mask for such a
+ * field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_OPTIONAL 0x3
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver must ensure that any mask for such a field is either all zeroes
+ * or a consecutive set of ones following by all zeroes (starting from MSB).
+ * The NIC will reject requests with an invalid mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_PREFIX 0x4
+/* enum: The NIC supports this field, and may optionally use it in match rules.
+ * The driver may provide an arbitrary mask for such a field.
+ */
+#define MAE_FIELD_SUPPORTED_MATCH_MASK 0x5
+
+/* MAE_CT_VNI_MODE enum: Controls the layout of the VNI input to the conntrack
+ * lookup. (Values are not arbitrary - constrained by table access ABI.)
+ */
+/* enum: The VNI input to the conntrack lookup will be zero. */
+#define MAE_CT_VNI_MODE_ZERO 0x0
+/* enum: The VNI input to the conntrack lookup will be the VNI (VXLAN/Geneve)
+ * or VSID (NVGRE) field from the packet.
+ */
+#define MAE_CT_VNI_MODE_VNI 0x1
+/* enum: The VNI input to the conntrack lookup will be the VLAN ID from the
+ * outermost VLAN tag (in bottom 12 bits; top 12 bits zero).
+ */
+#define MAE_CT_VNI_MODE_1VLAN 0x2
+/* enum: The VNI input to the conntrack lookup will be the VLAN IDs from both
+ * VLAN tags (outermost in bottom 12 bits, innermost in top 12 bits).
+ */
+#define MAE_CT_VNI_MODE_2VLAN 0x3
+
+/* MAE_FIELD enum: NB: this enum shares namespace with the support status enum.
+ */
+/* enum: Source mport upon entering the MAE. */
+#define MAE_FIELD_INGRESS_PORT 0x0
+#define MAE_FIELD_MARK 0x1 /* enum */
+/* enum: Table ID used in action rule. Initially zero, can be changed in action
+ * rule response.
+ */
+#define MAE_FIELD_RECIRC_ID 0x2
+#define MAE_FIELD_IS_IP_FRAG 0x3 /* enum */
+#define MAE_FIELD_DO_CT 0x4 /* enum */
+#define MAE_FIELD_CT_HIT 0x5 /* enum */
+/* enum: Undefined unless CT_HIT=1. */
+#define MAE_FIELD_CT_MARK 0x6
+/* enum: Undefined unless DO_CT=1. */
+#define MAE_FIELD_CT_DOMAIN 0x7
+/* enum: Undefined unless CT_HIT=1. */
+#define MAE_FIELD_CT_PRIVATE_FLAGS 0x8
+/* enum: 1 if the packet ingressed the NIC from one of the MACs, else 0. */
+#define MAE_FIELD_IS_FROM_NETWORK 0x9
+/* enum: 1 if the packet has 1 or more VLAN tags, else 0. */
+#define MAE_FIELD_HAS_OVLAN 0xa
+/* enum: 1 if the packet has 2 or more VLAN tags, else 0. */
+#define MAE_FIELD_HAS_IVLAN 0xb
+/* enum: 1 if the outer packet has 1 or more VLAN tags, else 0; only present
+ * when encap
+ */
+#define MAE_FIELD_ENC_HAS_OVLAN 0xc
+/* enum: 1 if the outer packet has 2 or more VLAN tags, else 0; only present
+ * when encap
+ */
+#define MAE_FIELD_ENC_HAS_IVLAN 0xd
+/* enum: Packet is IP fragment */
+#define MAE_FIELD_ENC_IP_FRAG 0xe
+#define MAE_FIELD_ETHER_TYPE 0x21 /* enum */
+#define MAE_FIELD_VLAN0_TCI 0x22 /* enum */
+#define MAE_FIELD_VLAN0_PROTO 0x23 /* enum */
+#define MAE_FIELD_VLAN1_TCI 0x24 /* enum */
+#define MAE_FIELD_VLAN1_PROTO 0x25 /* enum */
+/* enum: Inner when encap */
+#define MAE_FIELD_ETH_SADDR 0x28
+/* enum: Inner when encap */
+#define MAE_FIELD_ETH_DADDR 0x29
+/* enum: Inner when encap. NB: IPv4 and IPv6 fields are mutually exclusive. */
+#define MAE_FIELD_SRC_IP4 0x2a
+/* enum: Inner when encap */
+#define MAE_FIELD_SRC_IP6 0x2b
+/* enum: Inner when encap */
+#define MAE_FIELD_DST_IP4 0x2c
+/* enum: Inner when encap */
+#define MAE_FIELD_DST_IP6 0x2d
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_PROTO 0x2e
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_TOS 0x2f
+/* enum: Inner when encap */
+#define MAE_FIELD_IP_TTL 0x30
+/* enum: Inner when encap TODO: how this is defined? The raw flags +
+ * frag_offset from the packet, or some derived value more amenable to ternary
+ * matching? TODO: there was a proposal for driver-allocation fields. The
+ * driver would provide some instruction for how to extract given field values,
+ * and would be given a field id in return. It could then use that field id in
+ * its matches. This feels like it would be extremely hard to implement in
+ * hardware, but I mention it for completeness.
+ */
+#define MAE_FIELD_IP_FLAGS 0x31
+/* enum: Ports (UDP, TCP) Inner when encap */
+#define MAE_FIELD_L4_SPORT 0x32
+/* enum: Ports (UDP, TCP) Inner when encap */
+#define MAE_FIELD_L4_DPORT 0x33
+/* enum: Inner when encap */
+#define MAE_FIELD_TCP_FLAGS 0x34
+/* enum: TCP packet with any of SYN, FIN or RST flag set */
+#define MAE_FIELD_TCP_SYN_FIN_RST 0x35
+/* enum: Packet is IP fragment with fragment offset 0 */
+#define MAE_FIELD_IP_FIRST_FRAG 0x36
+/* enum: The type of encapsulated used for this packet. Value as per
+ * ENCAP_TYPE_*.
+ */
+#define MAE_FIELD_ENCAP_TYPE 0x3f
+/* enum: The ID of the outer rule that marked this packet as encapsulated.
+ * Useful for implicitly matching on outer fields.
+ */
+#define MAE_FIELD_OUTER_RULE_ID 0x40
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETHER_TYPE 0x41
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN0_TCI 0x42
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN0_PROTO 0x43
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN1_TCI 0x44
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_VLAN1_PROTO 0x45
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETH_SADDR 0x48
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_ETH_DADDR 0x49
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_SRC_IP4 0x4a
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_SRC_IP6 0x4b
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_DST_IP4 0x4c
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_DST_IP6 0x4d
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_PROTO 0x4e
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_TOS 0x4f
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_TTL 0x50
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_IP_FLAGS 0x51
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_L4_SPORT 0x52
+/* enum: Outer; only present when encap */
+#define MAE_FIELD_ENC_L4_DPORT 0x53
+/* enum: VNI (when VXLAN or GENEVE) VSID (when NVGRE) Bottom 24 bits of Key
+ * (when L2GRE) Outer; only present when encap
+ */
+#define MAE_FIELD_ENC_VNET_ID 0x54
+
+/* MAE_MCDI_ENCAP_TYPE enum: Encapsulation type. Defines how the payload will
+ * be parsed to an inner frame. Other values are reserved. Unknown values
+ * should be treated same as NONE. (Values are not arbitrary - constrained by
+ * table access ABI.)
+ */
+#define MAE_MCDI_ENCAP_TYPE_NONE 0x0 /* enum */
+/* enum: Don't assume enum aligns with support bitmask... */
+#define MAE_MCDI_ENCAP_TYPE_VXLAN 0x1
+#define MAE_MCDI_ENCAP_TYPE_NVGRE 0x2 /* enum */
+#define MAE_MCDI_ENCAP_TYPE_GENEVE 0x3 /* enum */
+#define MAE_MCDI_ENCAP_TYPE_L2GRE 0x4 /* enum */
+
+/* MAE_MPORT_END enum: Selects which end of the logical link identified by an
+ * MPORT_SELECTOR is targeted by an operation.
+ */
+/* enum: Selects the port on the MAE virtual switch */
+#define MAE_MPORT_END_MAE 0x1
+/* enum: Selects the virtual NIC plugged into the MAE switch */
+#define MAE_MPORT_END_VNIC 0x2
+
+/* MAE_COUNTER_TYPE enum: The datapath maintains several sets of counters, each
+ * being associated with a different table. Note that the same counter ID may
+ * be allocated by different counter blocks, so e.g. AR counter 42 is different
+ * from CT counter 42. Generation counts are also type-specific. This value is
+ * also present in the header of streaming counter packets, in the IDENTIFIER
+ * field (see packetiser packet format definitions).
+ */
+/* enum: Action Rule counters - can be referenced in AR response. */
+#define MAE_COUNTER_TYPE_AR 0x0
+/* enum: Conntrack counters - can be referenced in CT response. */
+#define MAE_COUNTER_TYPE_CT 0x1
+/* enum: Outer Rule counters - can be referenced in OR response. */
+#define MAE_COUNTER_TYPE_OR 0x2
+
+/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
+ * structured with bits [31:24] reserved (0), [23:16] indicating which major
+ * block the tables belongs to (0=VNIC TX, none currently; 1=MAE; 2=VNIC RX),
+ * [15:8] a unique ID within the block, and [7:0] reserved for future
+ * variations of the same table. (All of the tables currently defined within
+ * the streaming engines are listed here, but this does not imply that they are
+ * all supported - MC_CMD_TABLE_LIST returns the list of actually supported
+ * tables.)
+ */
+/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_OUTER_RULE_TABLE 0x10000
+/* enum: Outer_Rule_No_CT_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_OUTER_RULE_NO_CT_TABLE 0x10100
+/* enum: Mgmt_Filter_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_MGMT_FILTER_TABLE 0x10200
+/* enum: Conntrack_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_CONNTRACK_TABLE 0x10300
+/* enum: Action_Rule_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ACTION_RULE_TABLE 0x10400
+/* enum: Mgroup_Default_Action_Set_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_MGROUP_DEFAULT_ACTION_SET_TABLE 0x10500
+/* enum: Encap_Hdr_Part1_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ENCAP_HDR_PART1_TABLE 0x10600
+/* enum: Encap_Hdr_Part2_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_ENCAP_HDR_PART2_TABLE 0x10700
+/* enum: Replace_Src_MAC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_REPLACE_SRC_MAC_TABLE 0x10800
+/* enum: Replace_Dst_MAC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_REPLACE_DST_MAC_TABLE 0x10900
+/* enum: Dst_Mport_VC_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_DST_MPORT_VC_TABLE 0x10a00
+/* enum: LACP_LAG_Config_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_LACP_LAG_CONFIG_TABLE 0x10b00
+/* enum: LACP_Balance_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_LACP_BALANCE_TABLE 0x10c00
+/* enum: Dst_Mport_Host_Chan_Table in the MAE - refer to SF-123102-TC. */
+#define TABLE_ID_DST_MPORT_HOST_CHAN_TABLE 0x10d00
+/* enum: VNIC_Rx_Encap_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_VNIC_RX_ENCAP_TABLE 0x20000
+/* enum: Steering_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_STEERING_TABLE 0x20100
+/* enum: RSS_Context_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
+/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
+#define TABLE_ID_INDIRECTION_TABLE 0x20300
+
+/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
+ * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
+ * (ether_type_msb & 0x3);
+ */
+#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
+#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
+
+/* TABLE_NAT_DIR enum: NAT direction. */
+#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
+#define TABLE_NAT_DIR_DEST 0x1 /* enum */
+
+/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
+ * is constructed as a concatenation (indicated here by "++") of packet header
+ * fields.
+ */
+/* enum: IP src addr ++ IP dst addr */
+#define TABLE_RSS_KEY_MODE_SA_DA 0x0
+/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
+#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
+/* enum: IP src addr */
+#define TABLE_RSS_KEY_MODE_SA 0x2
+/* enum: IP dst addr */
+#define TABLE_RSS_KEY_MODE_DA 0x3
+/* enum: IP src addr ++ TCP/UDP src port */
+#define TABLE_RSS_KEY_MODE_SA_SP 0x4
+/* enum: IP dest addr ++ TCP dest port */
+#define TABLE_RSS_KEY_MODE_DA_DP 0x5
+/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
+#define TABLE_RSS_KEY_MODE_NONE 0x7
+
+/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
+/* enum: RSS uses Indirection_Table lookup. */
+#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
+/* enum: RSS uses even spreading calculation. */
+#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+
+/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
+ * loosely grouped together into blocks with gaps for expansion, but the values
+ * are arbitrary. Field IDs are not specific to particular tables, and in some
+ * cases this sharing means that they are not used with the exact names of the
+ * corresponding table definitions in SF-123102-TC; however, the mapping should
+ * still be clear. The intent is that a list of fields, with their associated
+ * bit widths and semantics version code, unambiguously defines the semantics
+ * of the fields in a key or response. (Again, this list includes all of the
+ * fields currently defined within the streaming engines, but only a subset may
+ * actually be used by the supported list of tables.)
+ */
+/* enum: May appear multiple times within a key or response, and indicates that
+ * the field is unused and should be set to 0 (or masked out if permitted by
+ * the MASK_VALUE for this field).
+ */
+#define TABLE_FIELD_ID_UNUSED 0x0
+/* enum: Source m-port (a full m-port label). */
+#define TABLE_FIELD_ID_SRC_MPORT 0x1
+/* enum: Destination m-port (a full m-port label). */
+#define TABLE_FIELD_ID_DST_MPORT 0x2
+/* enum: Source m-group ID. */
+#define TABLE_FIELD_ID_SRC_MGROUP_ID 0x3
+/* enum: Physical network port ID (or m-port ID; same thing, for physical
+ * network ports).
+ */
+#define TABLE_FIELD_ID_NETWORK_PORT_ID 0x4
+/* enum: True if packet arrived via network port, false if it arrived via host.
+ */
+#define TABLE_FIELD_ID_IS_FROM_NETWORK 0x5
+/* enum: Full virtual channel from capsule header. */
+#define TABLE_FIELD_ID_CH_VC 0x6
+/* enum: Low bits of virtual channel from capsule header. */
+#define TABLE_FIELD_ID_CH_VC_LOW 0x7
+/* enum: User mark value in metadata and packet prefix. */
+#define TABLE_FIELD_ID_USER_MARK 0x8
+/* enum: User flag value in metadata and packet prefix. */
+#define TABLE_FIELD_ID_USER_FLAG 0x9
+/* enum: Counter ID associated with a response. All-bits-1 is a null value to
+ * suppress counting.
+ */
+#define TABLE_FIELD_ID_COUNTER_ID 0xa
+/* enum: Discriminator which may be set by plugins in some lookup keys; this
+ * allows plugins to make a reinterpretation of packet fields in these keys
+ * without clashing with the normal interpretation.
+ */
+#define TABLE_FIELD_ID_DISCRIM 0xb
+/* enum: Destination MAC address. The mapping from bytes in a frame to the
+ * 48-bit value for this field is in network order, i.e. a MAC address of
+ * AA:BB:CC:DD:EE:FF becomes a 48-bit value of 0xAABBCCDDEEFF.
+ */
+#define TABLE_FIELD_ID_DST_MAC 0x14
+/* enum: Source MAC address (see notes for DST_MAC). */
+#define TABLE_FIELD_ID_SRC_MAC 0x15
+/* enum: Outer VLAN tag TPID, compressed to an enumeration. */
+#define TABLE_FIELD_ID_OVLAN_TPID_COMPRESSED 0x16
+/* enum: Full outer VLAN tag TCI (16 bits). */
+#define TABLE_FIELD_ID_OVLAN 0x17
+/* enum: Outer VLAN ID (least significant 12 bits of full 16-bit TCI) only. */
+#define TABLE_FIELD_ID_OVLAN_VID 0x18
+/* enum: Inner VLAN tag TPID, compressed to an enumeration. */
+#define TABLE_FIELD_ID_IVLAN_TPID_COMPRESSED 0x19
+/* enum: Full inner VLAN tag TCI (16 bits). */
+#define TABLE_FIELD_ID_IVLAN 0x1a
+/* enum: Inner VLAN ID (least significant 12 bits of full 16-bit TCI) only. */
+#define TABLE_FIELD_ID_IVLAN_VID 0x1b
+/* enum: Ethertype. */
+#define TABLE_FIELD_ID_ETHER_TYPE 0x1c
+/* enum: Source IP address, either IPv4 or IPv6. The mapping from bytes in a
+ * frame to the 128-bit value for this field is in network order, with IPv4
+ * addresses assumed to have 12 bytes of trailing zeroes. i.e. the IPv6 address
+ * [2345::6789:ABCD] is 0x2345000000000000000000006789ABCD; the IPv4 address
+ * 192.168.1.2 is 0xC0A80102000000000000000000000000.
+ */
+#define TABLE_FIELD_ID_SRC_IP 0x1d
+/* enum: Destination IP address (see notes for SRC_IP). */
+#define TABLE_FIELD_ID_DST_IP 0x1e
+/* enum: IPv4 Type-of-Service or IPv6 Traffic Class field. */
+#define TABLE_FIELD_ID_IP_TOS 0x1f
+/* enum: IP Protocol. */
+#define TABLE_FIELD_ID_IP_PROTO 0x20
+/* enum: Layer 4 source port. */
+#define TABLE_FIELD_ID_SRC_PORT 0x21
+/* enum: Layer 4 destination port. */
+#define TABLE_FIELD_ID_DST_PORT 0x22
+/* enum: TCP flags. */
+#define TABLE_FIELD_ID_TCP_FLAGS 0x23
+/* enum: Virtual Network Identifier (VXLAN) or Virtual Session ID (NVGRE). */
+#define TABLE_FIELD_ID_VNI 0x24
+/* enum: True if packet has any tunnel encapsulation header. */
+#define TABLE_FIELD_ID_HAS_ENCAP 0x32
+/* enum: True if encap header has an outer VLAN tag. */
+#define TABLE_FIELD_ID_HAS_ENC_OVLAN 0x33
+/* enum: True if encap header has an inner VLAN tag. */
+#define TABLE_FIELD_ID_HAS_ENC_IVLAN 0x34
+/* enum: True if encap header is some sort of IP. */
+#define TABLE_FIELD_ID_HAS_ENC_IP 0x35
+/* enum: True if encap header is specifically IPv4. */
+#define TABLE_FIELD_ID_HAS_ENC_IP4 0x36
+/* enum: True if encap header is UDP. */
+#define TABLE_FIELD_ID_HAS_ENC_UDP 0x37
+/* enum: True if only/inner frame has an outer VLAN tag. */
+#define TABLE_FIELD_ID_HAS_OVLAN 0x38
+/* enum: True if only/inner frame has an inner VLAN tag. */
+#define TABLE_FIELD_ID_HAS_IVLAN 0x39
+/* enum: True if only/inner frame is some sort of IP. */
+#define TABLE_FIELD_ID_HAS_IP 0x3a
+/* enum: True if only/inner frame has a recognised L4 IP protocol (TCP or UDP).
+ */
+#define TABLE_FIELD_ID_HAS_L4 0x3b
+/* enum: True if only/inner frame is an IP fragment. */
+#define TABLE_FIELD_ID_IP_FRAG 0x3c
+/* enum: True if only/inner frame is the first IP fragment (fragment offset 0).
+ */
+#define TABLE_FIELD_ID_IP_FIRST_FRAG 0x3d
+/* enum: True if only/inner frame has an IP Time-To-Live of <= 1. (Note: the
+ * implementation calls this "ip_ttl_is_one" but does in fact match packets
+ * with TTL=0 - which we shouldn't be seeing! - as well.)
+ */
+#define TABLE_FIELD_ID_IP_TTL_LE_ONE 0x3e
+/* enum: True if only/inner frame has any of TCP SYN, FIN or RST flags set. */
+#define TABLE_FIELD_ID_TCP_INTERESTING_FLAGS 0x3f
+/* enum: Plugin channel selection. */
+#define TABLE_FIELD_ID_RDP_PL_CHAN 0x50
+/* enum: Enable update of CH_ROUTE_RDP_C_PL route bit. */
+#define TABLE_FIELD_ID_RDP_C_PL_EN 0x51
+/* enum: New value of CH_ROUTE_RDP_C_PL route bit. */
+#define TABLE_FIELD_ID_RDP_C_PL 0x52
+/* enum: Enable update of CH_ROUTE_RDP_D_PL route bit. */
+#define TABLE_FIELD_ID_RDP_D_PL_EN 0x53
+/* enum: New value of CH_ROUTE_RDP_D_PL route bit. */
+#define TABLE_FIELD_ID_RDP_D_PL 0x54
+/* enum: Enable update of CH_ROUTE_RDP_OUT_HOST_CHAN route bit. */
+#define TABLE_FIELD_ID_RDP_OUT_HOST_CHAN_EN 0x55
+/* enum: New value of CH_ROUTE_RDP_OUT_HOST_CHAN route bit. */
+#define TABLE_FIELD_ID_RDP_OUT_HOST_CHAN 0x56
+/* enum: Recirculation ID for lookup sequences with two action rule lookups. */
+#define TABLE_FIELD_ID_RECIRC_ID 0x64
+/* enum: Domain ID passed to conntrack and action rule lookups. */
+#define TABLE_FIELD_ID_DOMAIN 0x65
+/* enum: Construction mode for encap_tunnel_id - see MAE_CT_VNI_MODE enum. */
+#define TABLE_FIELD_ID_CT_VNI_MODE 0x66
+/* enum: True to inhibit conntrack lookup if TCP SYN, FIN or RST flag is set.
+ */
+#define TABLE_FIELD_ID_CT_TCP_FLAGS_INHIBIT 0x67
+/* enum: True to do conntrack lookups for IPv4 TCP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP4_TCP 0x68
+/* enum: True to do conntrack lookups for IPv4 UDP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP4_UDP 0x69
+/* enum: True to do conntrack lookups for IPv6 TCP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP6_TCP 0x6a
+/* enum: True to do conntrack lookups for IPv6 UDP packets. */
+#define TABLE_FIELD_ID_DO_CT_IP6_UDP 0x6b
+/* enum: Outer rule identifier. */
+#define TABLE_FIELD_ID_OUTER_RULE_ID 0x6c
+/* enum: Encapsulation type - see MAE_MCDI_ENCAP_TYPE enum. */
+#define TABLE_FIELD_ID_ENCAP_TYPE 0x6d
+/* enum: Encap tunnel ID for conntrack lookups from VNI, VLAN tag(s), or 0,
+ * depending on CT_VNI_MODE.
+ */
+#define TABLE_FIELD_ID_ENCAP_TUNNEL_ID 0x78
+/* enum: A conntrack entry identifier, passed to plugins. */
+#define TABLE_FIELD_ID_CT_ENTRY_ID 0x79
+/* enum: Either source or destination NAT replacement port. */
+#define TABLE_FIELD_ID_NAT_PORT 0x7a
+/* enum: Either source or destination NAT replacement IPv4 address. Note that
+ * this is specifically an IPv4 address (IPv6 is not supported for NAT), with
+ * byte mapped to a 32-bit value in network order, i.e. the IPv4 address
+ * 192.168.1.2 is the value 0xC0A80102.
+ */
+#define TABLE_FIELD_ID_NAT_IP 0x7b
+/* enum: NAT direction: 0=>source, 1=>destination. */
+#define TABLE_FIELD_ID_NAT_DIR 0x7c
+/* enum: Conntrack mark value, passed to action rule lookup. Note that this is
+ * not related to the "user mark" in the metadata / packet prefix.
+ */
+#define TABLE_FIELD_ID_CT_MARK 0x7d
+/* enum: Private flags for conntrack, passed to action rule lookup. */
+#define TABLE_FIELD_ID_CT_PRIV_FLAGS 0x7e
+/* enum: True if the conntrack lookup resulted in a hit. */
+#define TABLE_FIELD_ID_CT_HIT 0x7f
+/* enum: True to suppress delivery when source and destination m-ports match.
+ */
+#define TABLE_FIELD_ID_SUPPRESS_SELF_DELIVERY 0x8c
+/* enum: True to perform tunnel decapsulation. */
+#define TABLE_FIELD_ID_DO_DECAP 0x8d
+/* enum: True to copy outer frame DSCP to inner on decap. */
+#define TABLE_FIELD_ID_DECAP_DSCP_COPY 0x8e
+/* enum: True to map outer frame ECN to inner on decap, by RFC 6040 rules. */
+#define TABLE_FIELD_ID_DECAP_ECN_RFC6040 0x8f
+/* enum: True to replace DSCP field. */
+#define TABLE_FIELD_ID_DO_REPLACE_DSCP 0x90
+/* enum: True to replace ECN field. */
+#define TABLE_FIELD_ID_DO_REPLACE_ECN 0x91
+/* enum: True to decrement IP Time-To-Live. */
+#define TABLE_FIELD_ID_DO_DECR_IP_TTL 0x92
+/* enum: True to replace source MAC address. */
+#define TABLE_FIELD_ID_DO_SRC_MAC 0x93
+/* enum: True to replace destination MAC address. */
+#define TABLE_FIELD_ID_DO_DST_MAC 0x94
+/* enum: Number of VLAN tags to pop. Valid values are 0, 1, or 2. */
+#define TABLE_FIELD_ID_DO_VLAN_POP 0x95
+/* enum: Number of VLANs tags to push. Valid values are 0, 1, or 2. */
+#define TABLE_FIELD_ID_DO_VLAN_PUSH 0x96
+/* enum: True to count this packet. */
+#define TABLE_FIELD_ID_DO_COUNT 0x97
+/* enum: True to perform tunnel encapsulation. */
+#define TABLE_FIELD_ID_DO_ENCAP 0x98
+/* enum: True to copy inner frame DSCP to outer on encap. */
+#define TABLE_FIELD_ID_ENCAP_DSCP_COPY 0x99
+/* enum: True to copy inner frame ECN to outer on encap. */
+#define TABLE_FIELD_ID_ENCAP_ECN_COPY 0x9a
+/* enum: True to deliver the packet (otherwise it is dropped). */
+#define TABLE_FIELD_ID_DO_DELIVER 0x9b
+/* enum: True to set the user flag in the metadata. */
+#define TABLE_FIELD_ID_DO_FLAG 0x9c
+/* enum: True to update the user mark in the metadata. */
+#define TABLE_FIELD_ID_DO_MARK 0x9d
+/* enum: True to override the capsule virtual channel for network deliveries.
+ */
+#define TABLE_FIELD_ID_DO_SET_NET_CHAN 0x9e
+/* enum: True to override the reported source m-port for host deliveries. */
+#define TABLE_FIELD_ID_DO_SET_SRC_MPORT 0x9f
+/* enum: Encap header ID for DO_ENCAP, indexing Encap_Hdr_Part1/2_Table. */
+#define TABLE_FIELD_ID_ENCAP_HDR_ID 0xaa
+/* enum: New DSCP value for DO_REPLACE_DSCP. */
+#define TABLE_FIELD_ID_DSCP_VALUE 0xab
+/* enum: If DO_REPLACE_ECN is set, the new value for the ECN field. If
+ * DO_REPLACE_ECN is not set, ECN_CONTROL[0] and ECN_CONTROL[1] are set to
+ * request remapping of ECT0 and ECT1 ECN codepoints respectively to CE.
+ */
+#define TABLE_FIELD_ID_ECN_CONTROL 0xac
+/* enum: Source MAC ID for DO_SRC_MAC, indexing Replace_Src_MAC_Table. */
+#define TABLE_FIELD_ID_SRC_MAC_ID 0xad
+/* enum: Destination MAC ID for DO_DST_MAC, indexing Replace_Dst_MAC_Table. */
+#define TABLE_FIELD_ID_DST_MAC_ID 0xae
+/* enum: Parameter for either DO_SET_NET_CHAN (only bottom 6 bits used in this
+ * case) or DO_SET_SRC_MPORT.
+ */
+#define TABLE_FIELD_ID_REPORTED_SRC_MPORT_OR_NET_CHAN 0xaf
+/* enum: 64-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK64 0xb4
+/* enum: 32-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK32 0xb5
+/* enum: 16-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK16 0xb6
+/* enum: 8-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK8 0xb7
+/* enum: 4-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK4 0xb8
+/* enum: 2-byte chunk of added encapsulation header. */
+#define TABLE_FIELD_ID_CHUNK2 0xb9
+/* enum: Added encapsulation header length in words. */
+#define TABLE_FIELD_ID_HDR_LEN_W 0xba
+/* enum: Static value for layer 2/3 LACP hash of the encapsulation header. */
+#define TABLE_FIELD_ID_ENC_LACP_HASH_L23 0xbb
+/* enum: Static value for layer 4 LACP hash of the encapsulation header. */
+#define TABLE_FIELD_ID_ENC_LACP_HASH_L4 0xbc
+/* enum: True to use the static ENC_LACP_HASH values for the encap header
+ * instead of the calculated values for the inner frame when delivering a newly
+ * encapsulated packet to a LAG m-port.
+ */
+#define TABLE_FIELD_ID_USE_ENC_LACP_HASHES 0xbd
+/* enum: True to trigger conntrack from first action rule lookup (AR=>CT=>AR
+ * sequence).
+ */
+#define TABLE_FIELD_ID_DO_CT 0xc8
+/* enum: True to perform NAT using parameters from conntrack lookup response.
+ */
+#define TABLE_FIELD_ID_DO_NAT 0xc9
+/* enum: True to trigger recirculated action rule lookup (AR=>AR sequence). */
+#define TABLE_FIELD_ID_DO_RECIRC 0xca
+/* enum: Next action set payload ID for replay. The null value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_NEXT_ACTION_SET_PAYLOAD 0xcb
+/* enum: Next action set row ID for replay. The null value is all-1-bits. */
+#define TABLE_FIELD_ID_NEXT_ACTION_SET_ROW 0xcc
+/* enum: Action set payload ID for additional delivery to management CPU. The
+ * null value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_MC_ACTION_SET_PAYLOAD 0xcd
+/* enum: Action set row ID for additional delivery to management CPU. The null
+ * value is all-1-bits.
+ */
+#define TABLE_FIELD_ID_MC_ACTION_SET_ROW 0xce
+/* enum: True to include layer 4 in LACP hash on delivery to a LAG m-port. */
+#define TABLE_FIELD_ID_LACP_INC_L4 0xdc
+/* enum: True to request that LACP is performed by a plugin. */
+#define TABLE_FIELD_ID_LACP_PLUGIN 0xdd
+/* enum: LACP_Balance_Table base address divided by 64. */
+#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
+/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
+#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
+ * other encapsulation types.
+ */
+#define TABLE_FIELD_ID_UDP_PORT 0xe6
+/* enum: True to perform RSS based on outer fields rather than inner fields. */
+#define TABLE_FIELD_ID_RSS_ON_OUTER 0xe7
+/* enum: True to perform steering table lookup on outer fields rather than
+ * inner fields.
+ */
+#define TABLE_FIELD_ID_STEER_ON_OUTER 0xe8
+/* enum: Destination queue ID for host delivery. */
+#define TABLE_FIELD_ID_DST_QID 0xf0
+/* enum: True to drop this packet. */
+#define TABLE_FIELD_ID_DROP 0xf1
+/* enum: True to strip outer VLAN tag from this packet. */
+#define TABLE_FIELD_ID_VLAN_STRIP 0xf2
+/* enum: True to override the user mark field with the supplied USER_MARK, or
+ * false to bitwise-OR the USER_MARK into it.
+ */
+#define TABLE_FIELD_ID_MARK_OVERRIDE 0xf3
+/* enum: True to override the user flag field with the supplied USER_FLAG, or
+ * false to bitwise-OR the USER_FLAG into it.
+ */
+#define TABLE_FIELD_ID_FLAG_OVERRIDE 0xf4
+/* enum: RSS context ID, indexing the RSS_Context_Table. */
+#define TABLE_FIELD_ID_RSS_CTX_ID 0xfa
+/* enum: True to enable RSS. */
+#define TABLE_FIELD_ID_RSS_EN 0xfb
+/* enum: Toeplitz hash key. */
+#define TABLE_FIELD_ID_KEY 0xfc
+/* enum: Key mode for IPv4 TCP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_TCP_V4_KEY_MODE 0xfd
+/* enum: Key mode for IPv6 TCP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_TCP_V6_KEY_MODE 0xfe
+/* enum: Key mode for IPv4 UDP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_UDP_V4_KEY_MODE 0xff
+/* enum: Key mode for IPv6 UDP packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_UDP_V6_KEY_MODE 0x100
+/* enum: Key mode for other IPv4 packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_OTHER_V4_KEY_MODE 0x101
+/* enum: Key mode for other IPv6 packets - see TABLE_RSS_KEY_MODE enum. */
+#define TABLE_FIELD_ID_OTHER_V6_KEY_MODE 0x102
+/* enum: Spreading mode - 0=>indirection; 1=>even. */
+#define TABLE_FIELD_ID_SPREAD_MODE 0x103
+/* enum: For indirection spreading mode, the base address of a region within
+ * the Indirection_Table. For even spreading mode, the number of queues to
+ * spread across (only values 1-255 are valid for this mode).
+ */
+#define TABLE_FIELD_ID_INDIR_TBL_BASE 0x104
+/* enum: For indirection spreading mode, identifies the length of a region
+ * within the Indirection_Table, where length = 32 << len_id. Must be set to 0
+ * for even spreading mode.
+ */
+#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
+/* enum: An offset to be applied to the base destination queue ID. */
+#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+
+/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
+ * platforms
+ */
#define MCDI_EVENT_LEN 8
#define MCDI_EVENT_CONT_LBN 32
#define MCDI_EVENT_CONT_WIDTH 1
@@ -447,17 +1164,21 @@
#define MCDI_EVENT_TX_ERR_TYPE_OFST 0
#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
-/* enum: Descriptor loader reported failure */
+/* enum: Descriptor loader reported failure. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
-/* enum: Descriptor ring empty and no EOP seen for packet */
+/* enum: Descriptor ring empty and no EOP seen for packet. Specific to
+ * EF10-family NICs
+ */
#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
-/* enum: Overlength packet */
+/* enum: Overlength packet. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_ERR_2BIG 0x3
-/* enum: Malformed option descriptor */
+/* enum: Malformed option descriptor. Specific to EF10-family NICs. */
#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
-/* enum: Option descriptor part way through a packet */
+/* enum: Option descriptor part way through a packet. Specific to EF10-family
+ * NICs.
+ */
#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
-/* enum: DMA or PIO data access error */
+/* enum: DMA or PIO data access error. Specific to EF10-family NICs */
#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
#define MCDI_EVENT_TX_ERR_INFO_OFST 0
#define MCDI_EVENT_TX_ERR_INFO_LBN 16
@@ -773,6 +1494,12 @@
* SF-122927-TC for details.
*/
#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26
+/* enum: Notification that the mport journal has changed since it was last read
+ * and updates can be read using the MC_CMD_MAE_MPORT_READ_JOURNAL command. The
+ * firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1070,7 +1797,13 @@
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
@@ -1482,12 +2215,24 @@
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_LBN 2080
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_WIDTH 32
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_LBN 2112
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_WIDTH 32
/* MC firmware version number */
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_LBN 2144
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_WIDTH 32
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_LBN 2176
+#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_WIDTH 32
/* MC firmware security level */
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276
#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4
@@ -1571,7 +2316,13 @@
#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_WIDTH 32
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
@@ -1587,7 +2338,13 @@
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_WIDTH 32
/* extra info */
#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
@@ -1611,7 +2368,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_WIDTH 32
/* extra info */
#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32
#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16
@@ -1633,6 +2396,33 @@
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
/* MC firmware unique build ID (as binary SHA-1 value) */
#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52
#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20
@@ -1650,7 +2440,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
/* The ID of the SUC chip. This is specific to the platform but typically
* indicates family, memory sizes etc. See SF-116728-SW for further details.
*/
@@ -1664,7 +2460,13 @@
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
/* FPGA version as three numbers. On Riverhead based systems this field uses
* the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
* FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
@@ -1686,6 +2488,489 @@
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240
#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64
+/* MC_CMD_GET_VERSION_V3_OUT msgresponse: Extended response providing version
+ * information for all adapter components. For Riverhead based designs, base MC
+ * firmware version fields refer to NMC firmware, while CMC firmware data is in
+ * dedicated CMC fields. Flags indicate which data is present in the response
+ * (depending on which components exist on a particular adapter)
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V3_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V3_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V3_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V3_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V3_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V3_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V3_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V3_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_NUM 3
+
+/* MC_CMD_GET_VERSION_V4_OUT msgresponse: Extended response providing SoC
+ * version information
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V4_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V4_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V4_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V4_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V4_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V4_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V4_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V4_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_NUM 3
+/* The SOC boot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_OFST 328
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_NUM 4
+/* The SOC uboot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_OFST 344
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_NUM 4
+/* The SOC main rootfs version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_OFST 360
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_NUM 4
+/* The SOC recovery buildroot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_OFST 376
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_NUM 4
+
+/* MC_CMD_GET_VERSION_V5_OUT msgresponse: Extended response providing bundle
+ * and board version information
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_V5_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_V5_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V5_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_V5_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_LBN 192
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_OFST 28
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_LBN 224
+#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_WIDTH 32
+/* extra info */
+#define MC_CMD_GET_VERSION_V5_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_V5_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define MC_CMD_GET_VERSION_V5_OUT_FLAGS_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_LBN 11
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_LBN 12
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_WIDTH 1
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_OFST 48
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_LBN 13
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_ID_OFST 52
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_NAME_OFST 76
+#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_OFST 140
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_OFST 156
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_LBN 1248
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_LBN 1280
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_CHIP_ID_OFST 164
+#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_OFST 168
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_OFST 184
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LEN 8
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_LBN 1472
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_LBN 1504
+#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_OFST 192
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXTRA_OFST 204
+#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_NAME_OFST 220
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_REVISION_OFST 236
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_SERIAL_OFST 240
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_SERIAL_LEN 64
+/* The version of the datapath hardware design as three number - a.b.c */
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_OFST 304
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_NUM 3
+/* The version of the firmware library used to control the datapath as three
+ * number - a.b.c
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_OFST 316
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_NUM 3
+/* The SOC boot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_OFST 328
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_NUM 4
+/* The SOC uboot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_OFST 344
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_NUM 4
+/* The SOC main rootfs version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_OFST 360
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_NUM 4
+/* The SOC recovery buildroot version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_OFST 376
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_NUM 4
+/* Board version as four numbers - a.b.c.d. BOARD_VERSION[0] duplicates the
+ * BOARD_REVISION field
+ */
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_OFST 392
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_NUM 4
+/* Bundle version as four numbers - a.b.c.d */
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_OFST 408
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_LEN 4
+#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_NUM 4
+
/***********************************/
/* MC_CMD_PTP
@@ -1789,7 +3074,9 @@
#define MC_CMD_PTP_IN_CMD_LEN 4
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
-/* Not used. Events are always sent to function relative queue 0. */
+/* Not used, initialize to 0. Events are always sent to function relative queue
+ * 0.
+ */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
/* PTP timestamping mode. Not used from Huntington onwards. */
@@ -1866,7 +3153,13 @@
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_WIDTH 32
/* enum: Number of fractional bits in frequency adjustment */
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
@@ -1897,7 +3190,13 @@
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_WIDTH 32
/* enum: Number of fractional bits in frequency adjustment */
/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
@@ -1936,7 +3235,13 @@
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_LEN 4
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_LBN 96
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_WIDTH 32
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_LEN 4
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_LBN 128
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_WIDTH 32
/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
@@ -2052,7 +3357,13 @@
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_LEN 4
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_LBN 64
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_WIDTH 32
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_LEN 4
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_LBN 96
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
@@ -2083,7 +3394,13 @@
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_LBN 96
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_WIDTH 32
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_LBN 128
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_WIDTH 32
/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
@@ -2130,7 +3447,9 @@
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Not used. Events are always sent to function relative queue 0. */
+/* Not used, initialize to 0. Events are always sent to function relative queue
+ * 0.
+ */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
@@ -2492,6 +3811,87 @@
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_LEN 40
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_TIME_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_QTR_NANOSECONDS 0x3
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SYNC_WINDOW_MIN_LEN 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_CAPABILITIES_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED0_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED1_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED2_LEN 4
+/* Minimum supported value for the FREQ field in
+ * MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST and
+ * MC_CMD_PTP/MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST message requests. If this message
+ * response is not supported a value of -0.1 ns should be assumed, which is
+ * equivalent to a -10% adjustment.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_OFST 24
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_OFST 24
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_LBN 192
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_WIDTH 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_OFST 28
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_LBN 224
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_WIDTH 32
+/* Maximum supported value for the FREQ field in
+ * MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST and
+ * MC_CMD_PTP/MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST message requests. If this message
+ * response is not supported a value of 0.1 ns should be assumed, which is
+ * equivalent to a +10% adjustment.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_OFST 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_OFST 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_LBN 256
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_WIDTH 32
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_OFST 36
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_LEN 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_LBN 288
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_WIDTH 32
+
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
@@ -2634,7 +4034,13 @@
#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
/* The requested update interval, in seconds. (Or the sub-command if ADDR is
* NULL.)
*/
@@ -3039,7 +4445,13 @@
#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
@@ -3643,6 +5055,8 @@
#define MC_CMD_MEDIA_BASE_T 0x6
/* enum: QSFP+. */
#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+/* enum: DSFP. */
+#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
/* enum: Native clause 22 */
@@ -3912,7 +5326,13 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -3995,28 +5415,52 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
@@ -4028,7 +5472,13 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -4111,49 +5561,91 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_WIDTH 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
/* Enum values, see field(s): */
/* 100M */
@@ -4524,7 +6016,13 @@
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_IN_ADDR_LO_WIDTH 32
#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_IN_ADDR_HI_WIDTH 32
#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16
@@ -4565,7 +6063,13 @@
#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_WIDTH 32
#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_WIDTH 32
#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16
@@ -4616,6 +6120,129 @@
#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+/* MC_CMD_SET_MAC_V3_IN msgrequest */
+#define MC_CMD_SET_MAC_V3_IN_LEN 40
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_V3_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_V3_IN_MTU_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_V3_IN_DRAIN_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_LBN 64
+#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_LBN 96
+#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_OFST 16
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_V3_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_V3_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_OFST 24
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_V3_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CONTROL_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
+/* Identifies the MAC to update by the specifying the end of a logical MAE
+ * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
+ * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
+ * circumstances. 1. Some will always work (e.g. a VF can always address its
+ * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
+ * meaningful and will always fail with EINVAL (e.g. attempting to address the
+ * VNIC end of a link to a physical port), 3. Some are meaningful but require
+ * the MCDI client to have the required permission and fail with EPERM
+ * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
+ * and 4. Some could be implementation-specific and fail with ENOTSUP if not
+ * available (no examples exist right now). See SF-123581-TC section 4.3 for
+ * more details.
+ */
+#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
+#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 35
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 276
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 272
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 34
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LINK_END_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_OFST 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_LBN 256
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_OFST 36
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_LBN 288
+#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -4649,7 +6276,13 @@
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_WIDTH 32
/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
@@ -4731,7 +6364,13 @@
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
#define MC_CMD_MAC_STATS_IN_DMA_OFST 8
@@ -4774,7 +6413,13 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
@@ -4930,7 +6575,13 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
@@ -4963,7 +6614,13 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
@@ -5037,7 +6694,13 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
@@ -5097,7 +6760,13 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
@@ -5108,7 +6777,13 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
@@ -5201,7 +6876,13 @@
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_LBN 64
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_WIDTH 32
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_LBN 96
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_WIDTH 32
/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
@@ -5706,6 +7387,9 @@
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_LBN 3
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_WIDTH 1
/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
* response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
@@ -6180,7 +7864,13 @@
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LO_LEN 4
+#define MC_CMD_SENSOR_ENTRY_LO_LBN 32
+#define MC_CMD_SENSOR_ENTRY_LO_WIDTH 32
#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_HI_LEN 4
+#define MC_CMD_SENSOR_ENTRY_HI_LBN 64
+#define MC_CMD_SENSOR_ENTRY_HI_WIDTH 32
#define MC_CMD_SENSOR_ENTRY_MINNUM 0
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127
@@ -6202,7 +7892,13 @@
/* MC_CMD_SENSOR_ENTRY_OFST 4 */
/* MC_CMD_SENSOR_ENTRY_LEN 8 */
/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LO_LEN 4 */
+/* MC_CMD_SENSOR_ENTRY_LO_LBN 32 */
+/* MC_CMD_SENSOR_ENTRY_LO_WIDTH 32 */
/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_HI_LEN 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_LBN 64 */
+/* MC_CMD_SENSOR_ENTRY_HI_WIDTH 32 */
/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */
@@ -6259,7 +7955,13 @@
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_WIDTH 32
/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
@@ -6271,7 +7973,13 @@
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_WIDTH 32
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
@@ -6286,7 +7994,13 @@
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_LBN 0
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_LEN 4
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_LBN 32
+#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_WIDTH 32
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8
#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4
@@ -6583,11 +8297,16 @@
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
* Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
- * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
- * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
- * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * SFP+ PHYs). The "media type" can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid "page number" input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+, PAGE=0 or 1
* returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
- * Anything else: currently undefined. Locks required: None. Return code: 0.
+ * For QSFP, PAGE=-1 is the lower (unbanked) page. PAGE=2 is the EEPROM and
+ * PAGE=3 is the module limits. For DSFP, module addressing requires a
+ * "BANK:PAGE". Not every bank has the same number of pages. See the Common
+ * Management Interface Specification (CMIS) for further details. A BANK:PAGE
+ * of "0xffff:0xffff" retrieves the lower (unbanked) page. Locks required -
+ * None. Return code - 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
#undef MC_CMD_0x4b_PRIVILEGE_CTG
@@ -6598,6 +8317,12 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -7404,7 +9129,13 @@
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
@@ -7589,7 +9320,13 @@
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
@@ -7782,7 +9519,7 @@
* large number (253) it is not anticipated that this will be needed in the
* near future, so can currently be ignored.
*
- * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * On Riverhead this command is implemented as a wrapper for `list` in the
* sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
@@ -7827,7 +9564,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for
+ * On Riverhead this command is implemented as a wrapper for
* `get_descriptions` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
@@ -7876,7 +9613,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * On Riverhead this command is implemented as a wrapper for `get_readings`
* in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
@@ -7997,7 +9734,13 @@
#define BUFTBL_ENTRY_RAWADDR_OFST 4
#define BUFTBL_ENTRY_RAWADDR_LEN 8
#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
+#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
+#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
+#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
#define BUFTBL_ENTRY_RAWADDR_LBN 32
#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
@@ -8007,14 +9750,25 @@
#define NVRAM_PARTITION_TYPE_ID_LEN 2
/* enum: Primary MC firmware partition */
#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: NMC firmware partition (this is intentionally an alias of MC_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_NMC_FIRMWARE 0x100
/* enum: Secondary MC firmware partition */
#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
/* enum: Expansion ROM partition */
#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
/* enum: Static configuration TLV partition */
#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Factory configuration TLV partition (this is intentionally an alias of
+ * STATIC_CONFIG)
+ */
+#define NVRAM_PARTITION_TYPE_FACTORY_CONFIG 0x400
/* enum: Dynamic configuration TLV partition */
#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: User configuration TLV partition (this is intentionally an alias of
+ * DYNAMIC_CONFIG)
+ */
+#define NVRAM_PARTITION_TYPE_USER_CONFIG 0x500
/* enum: Expansion ROM configuration data for port 0 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
@@ -8027,10 +9781,16 @@
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
/* enum: Non-volatile log output partition */
#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output partition for NMC firmware (this is
+ * intentionally an alias of LOG)
+ */
+#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Crash log partition for NMC firmware */
+#define NVRAM_PARTITION_TYPE_NMC_CRASH_LOG 0x801
/* enum: Application license key storage partition */
#define NVRAM_PARTITION_TYPE_LICENSE 0x900
/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
@@ -8047,6 +9807,22 @@
#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
/* enum: Non-volatile log output partition for FC */
#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: FPGA Stage 1 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_STAGE1 0xb05
+/* enum: FPGA Stage 2 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_STAGE2 0xb06
+/* enum: FPGA User XCLBIN / Programmable Region 0 bitstream */
+#define NVRAM_PARTITION_TYPE_FPGA_REGION0 0xb07
+/* enum: FPGA User XCLBIN (this is intentionally an alias of FPGA_REGION0) */
+#define NVRAM_PARTITION_TYPE_FPGA_XCLBIN_USER 0xb07
+/* enum: FPGA jump instruction (a.k.a. boot) partition to select Stage1
+ * bitstream
+ */
+#define NVRAM_PARTITION_TYPE_FPGA_JUMP 0xb08
+/* enum: FPGA Validate XCLBIN */
+#define NVRAM_PARTITION_TYPE_FPGA_XCLBIN_VALIDATE 0xb09
+/* enum: FPGA XOCL Configuration information */
+#define NVRAM_PARTITION_TYPE_FPGA_XOCL_CONFIG 0xb0a
/* enum: MUM firmware partition */
#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
/* enum: SUC firmware partition (this is intentionally an alias of
@@ -8055,6 +9831,10 @@
#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
/* enum: MUM Non-volatile log output partition. */
#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: SUC Non-volatile log output partition (this is intentionally an alias
+ * of MUM_LOG).
+ */
+#define NVRAM_PARTITION_TYPE_SUC_LOG 0xc01
/* enum: MUM Application table partition. */
#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
/* enum: MUM boot rom partition. */
@@ -8069,6 +9849,10 @@
#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
/* enum: Used by the expansion ROM for logging */
#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
+/* enum: Non-volatile log output partition for Expansion ROM (this is
+ * intentionally an alias of PXE_LOG).
+ */
+#define NVRAM_PARTITION_TYPE_EXPROM_LOG 0x1000
/* enum: Used for XIP code of shmbooted images */
#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
@@ -8077,6 +9861,10 @@
* between XJTAG and Manftest.
*/
#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Deployment configuration TLV partition (this is intentionally an alias
+ * of MANUFACTURING)
+ */
+#define NVRAM_PARTITION_TYPE_DEPLOYMENT_CONFIG 0x1300
/* enum: Spare partition 4 */
#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
@@ -8112,14 +9900,45 @@
#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02
/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */
#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03
+/* enum: Partition to store ASN.1 format Bundle Signature for checking. */
+#define NVRAM_PARTITION_TYPE_BUNDLE_SIGNATURE 0x1e04
+/* enum: Test partition on SmartNIC system microcontroller (SUC) */
+#define NVRAM_PARTITION_TYPE_SUC_TEST 0x1f00
+/* enum: System microcontroller access to primary FPGA flash. */
+#define NVRAM_PARTITION_TYPE_SUC_FPGA_PRIMARY 0x1f01
+/* enum: System microcontroller access to secondary FPGA flash (if present) */
+#define NVRAM_PARTITION_TYPE_SUC_FPGA_SECONDARY 0x1f02
+/* enum: System microcontroller access to primary System-on-Chip flash */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_PRIMARY 0x1f03
+/* enum: System microcontroller access to secondary System-on-Chip flash (if
+ * present)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_SECONDARY 0x1f04
+/* enum: System microcontroller critical failure logs. Contains structured
+ * details of sensors leading up to a critical failure (where the board is shut
+ * down).
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FAILURE_LOG 0x1f05
+/* enum: System-on-Chip configuration information (see XN-200467-PS). */
+#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
+/* enum: System-on-Chip update information. */
+#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
/* enum: Recovery partition map (provided if real map is missing or corrupt) */
#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Recovery Flash Partition Table, see SF-122606-TC. (this is
+ * intentionally an alias of RECOVERY_MAP)
+ */
+#define NVRAM_PARTITION_TYPE_RECOVERY_FPT 0xfffe
/* enum: Partition map (real map as stored in flash) */
#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+/* enum: Flash Partition Table, see SF-122606-TC. (this is intentionally an
+ * alias of PARTITION_MAP)
+ */
+#define NVRAM_PARTITION_TYPE_FPT 0xffff
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
@@ -8168,7 +9987,13 @@
#define LICENSED_FEATURES_MASK_OFST 0
#define LICENSED_FEATURES_MASK_LEN 8
#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_LO_LEN 4
+#define LICENSED_FEATURES_MASK_LO_LBN 0
+#define LICENSED_FEATURES_MASK_LO_WIDTH 32
#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_MASK_HI_LEN 4
+#define LICENSED_FEATURES_MASK_HI_LBN 32
+#define LICENSED_FEATURES_MASK_HI_WIDTH 32
#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
@@ -8208,7 +10033,13 @@
#define LICENSED_V3_APPS_MASK_OFST 0
#define LICENSED_V3_APPS_MASK_LEN 8
#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_LO_LEN 4
+#define LICENSED_V3_APPS_MASK_LO_LBN 0
+#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_MASK_HI_LEN 4
+#define LICENSED_V3_APPS_MASK_HI_LBN 32
+#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
#define LICENSED_V3_APPS_ONLOAD_OFST 0
#define LICENSED_V3_APPS_ONLOAD_LBN 0
#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
@@ -8266,7 +10097,13 @@
#define LICENSED_V3_FEATURES_MASK_OFST 0
#define LICENSED_V3_FEATURES_MASK_LEN 8
#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LO_LEN 4
+#define LICENSED_V3_FEATURES_MASK_LO_LBN 0
+#define LICENSED_V3_FEATURES_MASK_LO_WIDTH 32
#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_MASK_HI_LEN 4
+#define LICENSED_V3_FEATURES_MASK_HI_LBN 32
+#define LICENSED_V3_FEATURES_MASK_HI_WIDTH 32
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
@@ -8421,7 +10258,8 @@
#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
@@ -8493,7 +10331,13 @@
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64
@@ -8514,7 +10358,8 @@
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
@@ -8611,7 +10456,13 @@
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64
@@ -8637,6 +10488,158 @@
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+/* MC_CMD_INIT_EVQ_V3_IN msgrequest: Extended request to specify per-queue
+ * event merge timeouts.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_LEN 556
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V3_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V3_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_OFST 16
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_LBN 11
+#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V3_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V3_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V3_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V3_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V3_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_LBN 288
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_LBN 320
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MAXNUM_MCDI2 64
+/* Receive event merge timeout to configure, in nanoseconds. The valid range
+ * and granularity are device specific. Specify 0 to use the firmware's default
+ * value. This field is ignored and per-queue merging is disabled if
+ * MC_CMD_INIT_EVQ/MC_CMD_INIT_EVQ_IN/FLAG_RX_MERGE is not set.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_RX_MERGE_TIMEOUT_NS_OFST 548
+#define MC_CMD_INIT_EVQ_V3_IN_RX_MERGE_TIMEOUT_NS_LEN 4
+/* Transmit event merge timeout to configure, in nanoseconds. The valid range
+ * and granularity are device specific. Specify 0 to use the firmware's default
+ * value. This field is ignored and per-queue merging is disabled if
+ * MC_CMD_INIT_EVQ/MC_CMD_INIT_EVQ_IN/FLAG_TX_MERGE is not set.
+ */
+#define MC_CMD_INIT_EVQ_V3_IN_TX_MERGE_TIMEOUT_NS_OFST 552
+#define MC_CMD_INIT_EVQ_V3_IN_TX_MERGE_TIMEOUT_NS_LEN 4
+
+/* MC_CMD_INIT_EVQ_V3_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V3_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V3_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V3_OUT_IRQ_LEN 4
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAGS_LEN 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
/* QUEUE_CRC_MODE structuredef */
#define QUEUE_CRC_MODE_LEN 1
#define QUEUE_CRC_MODE_MODE_LBN 0
@@ -8687,7 +10690,8 @@
#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
@@ -8728,7 +10732,13 @@
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
@@ -8752,7 +10762,8 @@
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
@@ -8826,8 +10837,16 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
@@ -8849,7 +10868,8 @@
#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
@@ -8923,8 +10943,16 @@
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
@@ -8975,7 +11003,8 @@
#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4
@@ -9049,8 +11078,16 @@
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4
@@ -9114,7 +11151,8 @@
#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8
#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4
@@ -9188,8 +11226,16 @@
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MINNUM 0
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MAXNUM 64
+#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540
#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4
@@ -9285,7 +11331,8 @@
#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
@@ -9329,7 +11376,13 @@
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_WIDTH 32
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
@@ -9350,7 +11403,8 @@
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
@@ -9399,6 +11453,9 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_LBN 17
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
@@ -9409,8 +11466,14 @@
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_LBN 224
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_WIDTH 32
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
-#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_LBN 256
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 0
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
/* Flags related to Qbb flow control mode. */
@@ -9507,7 +11570,13 @@
#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_LEN 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_LBN 32
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_WIDTH 32
#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_LEN 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_LBN 64
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_WIDTH 32
/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
@@ -9606,7 +11675,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
@@ -9616,7 +11691,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
@@ -9627,7 +11708,13 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
@@ -9651,7 +11738,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
@@ -9661,7 +11754,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
@@ -9672,7 +11771,13 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
@@ -9788,7 +11893,13 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
@@ -9844,7 +11955,13 @@
#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
@@ -9888,6 +12005,9 @@
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10000,7 +12120,13 @@
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
@@ -10086,6 +12212,9 @@
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10263,9 +12392,10 @@
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
- * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
- * its rte_flow API. This extension is only useful with the sfc_efx driver
- * included as part of DPDK, used in conjunction with the dpdk datapath
+ * filter actions for EF100. Some of these actions are also supported on EF10,
+ * for Intel's DPDK (Data Plane Development Kit, dpdk.org) via its rte_flow
+ * API. In the latter case, this extension is only useful with the sfc_efx
+ * driver included as part of DPDK, used in conjunction with the dpdk datapath
* firmware variant.
*/
#define MC_CMD_FILTER_OP_V3_IN_LEN 180
@@ -10278,7 +12408,13 @@
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_WIDTH 32
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
@@ -10364,6 +12500,9 @@
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
@@ -10539,11 +12678,42 @@
*/
#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
-/* Set an action for all packets matching this filter. The DPDK driver and dpdk
- * f/w variant use their own specific delivery structures, which are documented
- * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
- * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
- * will cause the filter insertion to fail with ENOTSUP.
+/* Flags controlling mutations of the packet and/or metadata when the filter is
+ * matched. The user_mark and user_flag fields' logic is as follows: if
+ * (req.MATCH_BITOR_FLAG == 1) user_flag = req.MATCH_SET_FLAG bit_or user_flag;
+ * else user_flag = req.MATCH_SET_FLAG; if (req.MATCH_SET_MARK == 0) user_mark
+ * = 0; else if (req.MATCH_BITOR_MARK == 1) user_mark = req.MATCH_SET_MARK
+ * bit_or user_mark; else user_mark = req.MATCH_SET_MARK; N.B. These flags
+ * overlap with the MATCH_ACTION field, which is deprecated in favour of this
+ * field. For the cases where these flags induce a valid encoding of the
+ * MATCH_ACTION field, the semantics agree.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAGS_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAGS_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_LBN 2
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_LBN 3
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_LBN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_WIDTH 1
+/* Deprecated: the overlapping MATCH_ACTION_FLAGS field exposes all of the
+ * functionality of this field in an ABI-backwards-compatible manner, and
+ * should be used instead. Any future extensions should be made to the
+ * MATCH_ACTION_FLAGS field, and not to this field. Set an action for all
+ * packets matching this filter. The DPDK driver and (on EF10) dpdk f/w variant
+ * use their own specific delivery structures, which are documented in the DPDK
+ * Firmware Driver Interface (SF-119419-TC). Requesting anything other than
+ * MATCH_ACTION_NONE on an EF10 NIC running another f/w variant will cause the
+ * filter insertion to fail with ENOTSUP.
*/
#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
@@ -10580,7 +12750,13 @@
#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_WIDTH 32
/* enum: guaranteed invalid filter handle (low 32 bits) */
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
/* enum: guaranteed invalid filter handle (high 32 bits) */
@@ -10600,7 +12776,13 @@
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_LEN 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_LBN 32
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_WIDTH 32
#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_LEN 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_LBN 64
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_OUT/HANDLE */
@@ -10638,6 +12820,8 @@
* rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later)
*/
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
+/* enum: read the supported encapsulation types for the VNIC */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -10704,6 +12888,30 @@
#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61
#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+/* MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT msgresponse: Returns
+ * the supported encapsulation types for the VNIC
+ */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_LEN 8
+/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_TYPES is returned */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+
/***********************************/
/* MC_CMD_PARSER_DISP_RW
@@ -10849,9 +13057,15 @@
/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
-/* Identifies the port assignment for this function. */
+/* Identifies the port assignment for this function. On EF100, it is possible
+ * for the function to have no network port assigned (either because it is not
+ * yet configured, or assigning a port to a given function personality makes no
+ * sense - e.g. virtio-blk), in which case the return value is NULL_PORT.
+ */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
+/* enum: Special value to indicate no port is assigned to a function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_NULL_PORT 0xffffffff
/***********************************/
@@ -11009,7 +13223,8 @@
/***********************************/
/* MC_CMD_GET_VI_ALLOC_INFO
* Get information about number of VI's and base VI number allocated to this
- * function.
+ * function. This message is not available to dynamic clients created by
+ * MC_CMD_CLIENT_ALLOC.
*/
#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
#undef MC_CMD_0x8d_PRIVILEGE_CTG
@@ -11036,7 +13251,9 @@
/***********************************/
/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ * For CmdClient use. Dump pertinent information on a specific absolute VI. The
+ * VI must be owned by the calling client or one of its ancestors; usership of
+ * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
*/
#define MC_CMD_DUMP_VI_STATE 0x8e
#undef MC_CMD_0x8e_PRIVILEGE_CTG
@@ -11050,7 +13267,7 @@
#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
/* The PF part of the function owning this VI. */
#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
@@ -11073,12 +13290,24 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
/* Raw evq timer table data. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
@@ -11095,22 +13324,46 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
/* TXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
/* TXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
@@ -11130,22 +13383,46 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
/* RXDPCPU raw table data for queue. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
/* Reserved, currently 0. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
@@ -11158,6 +13435,9 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+/* Current user, as assigned by MC_CMD_SET_VI_USER. */
+#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
+#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
/***********************************/
@@ -11200,7 +13480,9 @@
/***********************************/
/* MC_CMD_GET_VI_TLP_PROCESSING
- * Get TLP steering and ordering information for a VI.
+ * Get TLP steering and ordering information for a VI. The caller must have the
+ * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
#undef MC_CMD_0xb0_PRIVILEGE_CTG
@@ -11239,7 +13521,9 @@
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
- * Set TLP steering and ordering information for a VI.
+ * Set TLP steering and ordering information for a VI. The caller must have the
+ * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
@@ -14497,6 +16781,24 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -14983,6 +17285,24 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -14990,7 +17310,13 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_WIDTH 32
#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_WIDTH 32
/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184
@@ -15477,6 +17803,24 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -15484,7 +17828,13 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_WIDTH 32
#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_WIDTH 32
/* The minimum size (in table entries) of indirection table to be allocated
* from the pool for an RSS context. Note that the table size used must be a
* power of 2.
@@ -15521,6 +17871,573 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V10_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LEN 192
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -15729,7 +18646,7 @@
/* Handle for allocated push I/O buffer. */
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
-/* Function Local Instance (VI) number. */
+/* Function Local Instance (VI) number which has a TxQ allocated to it. */
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
@@ -17303,7 +20220,13 @@
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
@@ -17503,6 +20426,18 @@
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
+/* MC_CMD_GET_FUNCTION_INFO_OUT_V2 msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN 12
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_PF_LEN 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_VF_LEN 4
+/* Values from PCIE_INTERFACE enumeration. For NICs with a single interface, or
+ * in the case of a V1 response, this should be HOST_PRIMARY.
+ */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_INTF_OFST 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_INTF_LEN 4
+
/***********************************/
/* MC_CMD_ENABLE_OFFLINE_BIST
@@ -18570,7 +21505,13 @@
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_LBN 192
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_LBN 224
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_WIDTH 32
/* reserved for future use */
#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
@@ -18578,7 +21519,13 @@
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_LBN 448
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_LEN 4
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_LBN 480
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_WIDTH 32
/* reserved for future use */
#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
@@ -18681,7 +21628,13 @@
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
@@ -18713,7 +21666,13 @@
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
@@ -18721,7 +21680,13 @@
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
/***********************************/
@@ -18826,7 +21791,13 @@
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
@@ -18876,7 +21847,13 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
/* whether to turn on or turn off the masked features */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
@@ -18956,7 +21933,13 @@
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
/***********************************/
@@ -19322,7 +22305,7 @@
* TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
* contains all modes implemented in firmware for a particular board. Modes
* listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
* should be considered hidden (not to be exposed in userland tools) and for
* engineering use only. There are no other semantic differences and any mode
* listed in either MODES or ENGINEERING_MODES can be set on the board.
@@ -19490,6 +22473,22 @@
* SF-117064-DG for background).
*/
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
+/* enum: Control the Match-Action Engine if present. See mcdi_mae.yml. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE 0x10000
+/* enum: This Function/client may call MC_CMD_CLIENT_ALLOC to create new
+ * dynamic client children of itself.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALLOC_CLIENT 0x20000
+/* enum: A dynamic client with this privilege may perform all the same DMA
+ * operations as the function client from which it is descended.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_FUNC_DMA 0x40000
+/* enum: A client with this privilege may perform DMA as any PCIe function on
+ * the device and to on-device DDR. It allows clients to use TX-DESC2CMPT-DESC
+ * descriptors, and to use TX-SEG-DESC and TX-MEM2MEM-DESC with an address
+ * space override (i.e. with the ADDR_SPC_EN bit set).
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ARBITRARY_DMA 0x80000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -20277,7 +23276,8 @@
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index.
+ * local queue index. The calling client must be the currently-assigned user of
+ * this VI (see MC_CMD_SET_VI_USER).
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
@@ -20499,7 +23499,13 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
+#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
@@ -20521,6 +23527,9 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
+#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
@@ -20530,6 +23539,12 @@
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
+#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
@@ -20575,9 +23590,12 @@
#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
+#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
+#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
+#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
@@ -20814,6 +23832,21 @@
#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
+/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
+ * requests of the device and that can own resources managed by the device.
+ * Examples of clients include PCIe functions and dynamic clients. A client
+ * handle is a 32b opaque value used to refer to a client. Further details can
+ * be found within XN-200418-TC.
+ */
+#define CLIENT_HANDLE_LEN 4
+#define CLIENT_HANDLE_OPAQUE_OFST 0
+#define CLIENT_HANDLE_OPAQUE_LEN 4
+/* enum: A client handle guaranteed never to refer to a real client. */
+#define CLIENT_HANDLE_NULL 0xffffffff
+/* enum: Used to refer to the calling client. */
+#define CLIENT_HANDLE_SELF 0xfffffffe
+#define CLIENT_HANDLE_OPAQUE_LBN 0
+#define CLIENT_HANDLE_OPAQUE_WIDTH 32
/* CLOCK_INFO structuredef: Information about a single hardware clock */
#define CLOCK_INFO_LEN 28
@@ -20848,7 +23881,13 @@
#define CLOCK_INFO_FREQUENCY_OFST 4
#define CLOCK_INFO_FREQUENCY_LEN 8
#define CLOCK_INFO_FREQUENCY_LO_OFST 4
+#define CLOCK_INFO_FREQUENCY_LO_LEN 4
+#define CLOCK_INFO_FREQUENCY_LO_LBN 32
+#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
#define CLOCK_INFO_FREQUENCY_HI_OFST 8
+#define CLOCK_INFO_FREQUENCY_HI_LEN 4
+#define CLOCK_INFO_FREQUENCY_HI_LBN 64
+#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
#define CLOCK_INFO_FREQUENCY_LBN 32
#define CLOCK_INFO_FREQUENCY_WIDTH 64
/* Human-readable ASCII name for clock, with NUL termination */
@@ -20858,6 +23897,57 @@
#define CLOCK_INFO_NAME_LBN 96
#define CLOCK_INFO_NAME_WIDTH 8
+/* SCHED_CREDIT_CHECK_RESULT structuredef */
+#define SCHED_CREDIT_CHECK_RESULT_LEN 16
+/* The instance of the scheduler. Refer to XN-200389-AW for the location of
+ * these schedulers in the hardware.
+ */
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_A 0x0 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_A 0x1 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_B 0x2 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_C 0x3 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_TX 0x4 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_D 0x5 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_REPLAY 0x6 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
+#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
+/* The type of node that this result refers to. */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_OFST 1
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LEN 1
+/* enum: Destination node */
+#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
+/* enum: Source node */
+#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
+#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
+/* Level of node in scheduler hierarchy (level 0 is the bottom of the
+ * hierarchy, increasing towards the root node).
+ */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_OFST 2
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_LEN 2
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_LBN 16
+#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_WIDTH 16
+/* Node index */
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_OFST 4
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_LBN 32
+#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_WIDTH 32
+/* The number of credits the node is expected to have. */
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_OFST 8
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_LBN 64
+#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_WIDTH 32
+/* The number of credits the node actually had. */
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_OFST 12
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_LEN 4
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_LBN 96
+#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_WIDTH 32
+
/***********************************/
/* MC_CMD_GET_CLOCKS_INFO
@@ -20887,7 +23977,19 @@
/***********************************/
/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full.
+ * Add a rule for detecting encapsulations in the VNIC stage. Currently this
+ * only affects checksum validation in VNIC RX - on TX the send descriptor
+ * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
+ * apply to the current driver. If a rule matches, then the packet is
+ * considered to have the corresponding encapsulation type, and the inner
+ * packet is parsed. It is up to the driver to ensure that overlapping rules
+ * are not inserted. (If a packet would match multiple rules, a random one of
+ * them will be used.) A rule with the exact same match criteria may not be
+ * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
+ * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
+ * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
+ * combinations. Each driver may only have a limited set of active rules -
+ * returns ENOSPC if the caller's table is full.
*/
#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
#undef MC_CMD_0x16d_PRIVILEGE_CTG
@@ -20951,6 +24053,12 @@
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
+#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
@@ -20967,7 +24075,9 @@
/***********************************/
/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule.
+ * Remove a VNIC encapsulation rule. Packets which would have previously
+ * matched the rule will then be considered as unencapsulated. Returns EALREADY
+ * if the input HANDLE doesn't correspond to an existing rule.
*/
#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
#undef MC_CMD_0x16e_PRIVILEGE_CTG
@@ -20983,6 +24093,964 @@
/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
+/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
+ * the endianness specified by the RFC; users should ignore the broken-out
+ * fields and instead do straight memory copies to ensure correct ordering.
+ */
+#define UUID_LEN 16
+#define UUID_TIME_LOW_OFST 0
+#define UUID_TIME_LOW_LEN 4
+#define UUID_TIME_LOW_LBN 0
+#define UUID_TIME_LOW_WIDTH 32
+#define UUID_TIME_MID_OFST 4
+#define UUID_TIME_MID_LEN 2
+#define UUID_TIME_MID_LBN 32
+#define UUID_TIME_MID_WIDTH 16
+#define UUID_TIME_HI_LBN 52
+#define UUID_TIME_HI_WIDTH 12
+#define UUID_VERSION_LBN 48
+#define UUID_VERSION_WIDTH 4
+#define UUID_RESERVED_LBN 64
+#define UUID_RESERVED_WIDTH 2
+#define UUID_CLK_SEQ_LBN 66
+#define UUID_CLK_SEQ_WIDTH 14
+#define UUID_NODE_OFST 10
+#define UUID_NODE_LEN 6
+#define UUID_NODE_LBN 80
+#define UUID_NODE_WIDTH 48
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_ALLOC
+ * Create a handle to a datapath plugin's extension. This involves finding a
+ * currently-loaded plugin offering the given functionality (as identified by
+ * the UUID) and allocating a handle to track the usage of it. Plugin
+ * functionality is identified by 'extension' rather than any other identifier
+ * so that a single plugin bitfile may offer more than one piece of independent
+ * functionality. If two bitfiles are loaded which both offer the same
+ * extension, then the metadata is interrogated further to determine which is
+ * the newest and that is the one opened. See SF-123625-SW for architectural
+ * detail on datapath plugins.
+ */
+#define MC_CMD_PLUGIN_ALLOC 0x1ad
+#undef MC_CMD_0x1ad_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
+#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
+/* The functionality requested of the plugin, as a UUID structure */
+#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
+#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
+/* Additional options for opening the handle */
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
+#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
+/* Load the extension only if it is in the specified administrative group.
+ * Specify ANY to load the extension wherever it is found (if there are
+ * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
+ * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
+ * administrative groups.
+ */
+#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
+#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
+/* enum: Load the extension from any ADMIN_GROUP. */
+#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
+/* Reserved */
+#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
+#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
+
+/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
+#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
+/* Unique identifier of this usage */
+#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_FREE
+ * Delete a handle to a plugin's extension.
+ */
+#define MC_CMD_PLUGIN_FREE 0x1ae
+#undef MC_CMD_0x1ae_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_FREE_IN msgrequest */
+#define MC_CMD_PLUGIN_FREE_IN_LEN 4
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
+
+/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
+#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_GLOBAL
+ * Returns the global metadata applying to the whole plugin extension. See the
+ * other metadata calls for subtypes of data.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
+#undef MC_CMD_0x1af_PRIVILEGE_CTG
+
+#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
+/* Unique identifier of this plugin extension. This is identical to the value
+ * which was requested when the handle was allocated.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
+/* semver sub-version of this plugin extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
+/* semver micro-version of this plugin extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
+/* Number of different messages which can be sent to this extension */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
+/* Byte offset within the VI window of the plugin's mapped CSR window. */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
+/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
+ * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
+ * MAPPED_CSR_FLAGS are ignored).
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
+/* Flags indicating how to perform the CSR window mapping. */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
+/* Identifier of the set of extensions which all change state together.
+ * Extensions having the same ADMIN_GROUP will always load and unload at the
+ * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
+ * generation number as an implementation detail to ensure that they're not
+ * reused rapidly).
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
+/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
+ * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
+ * access to this extension.
+ */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
+/* Reserved */
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
+#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER
+ * Returns metadata supplied by the plugin author which describes this
+ * extension in a human-readable way. Contrast with
+ * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
+ * to operate.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
+#undef MC_CMD_0x1b0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
+/* Category of data to return */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
+/* enum: Top-level information about the extension. The returned data is an
+ * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
+ * the extension. The data is a back-to-back list of zero-terminated strings;
+ * the even-numbered fields (0,2,4,...) are keys and their following odd-
+ * numbered fields are the corresponding values. Both keys and values are
+ * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
+ * times. Note that all information (including the key/value structure itself
+ * and the UTF-8 encoding) may have been provided by the plugin author, so
+ * callers must be cautious about parsing it. Callers should parse only the
+ * top-level structure to separate out the keys and values; the contents of the
+ * values is not expected to be machine-readable.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
+/* Byte position of the data to be returned within the full data block of the
+ * given SUBTYPE.
+ */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
+/* Full length of the data block of the requested SUBTYPE, in bytes. */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
+/* The information requested by SUBTYPE. */
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
+#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_META_MSG
+ * Returns the simple metadata for a specific plugin request message. This
+ * supplies information necessary for the host to know how to build an
+ * MC_CMD_PLUGIN_REQ request.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
+#undef MC_CMD_0x1b1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
+/* Unique message ID to obtain */
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
+#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
+
+/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
+/* Unique message ID. This is the same value as the input parameter; it exists
+ * to allow future MCDI extensions which enumerate all messages.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
+/* Packed index number of this message, assigned by the MC to give each message
+ * a unique ID in an array to allow for more efficient storage/management.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
+/* Short human-readable codename for this message. This is conventionally
+ * formatted as a C identifier in the basic ASCII character set with any spare
+ * bytes at the end set to 0, however this convention is not enforced by the MC
+ * so consumers must check for all potential malformations before using it for
+ * a trusted purpose.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
+/* Number of bytes of data which must be passed from the host kernel to the MC
+ * for this message's payload, and which are passed back again in the response.
+ * The MC's plugin metadata loader will have validated that the number of bytes
+ * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
+ * message.
+ */
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
+#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
+
+/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
+ * an individual extension.
+ */
+#define PLUGIN_EXTENSION_LEN 20
+#define PLUGIN_EXTENSION_UUID_OFST 0
+#define PLUGIN_EXTENSION_UUID_LEN 16
+#define PLUGIN_EXTENSION_UUID_LBN 0
+#define PLUGIN_EXTENSION_UUID_WIDTH 128
+#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
+#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
+#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
+#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
+#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
+#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
+#define PLUGIN_EXTENSION_RESERVED_LBN 137
+#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_GET_ALL
+ * Returns a list of all plugin extensions currently loaded and available. The
+ * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
+ * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
+ * ADMIN_GROUP field collects how extensions are grouped in to units which are
+ * loaded/unloaded together; extensions with the same value are in the same
+ * group.
+ */
+#define MC_CMD_PLUGIN_GET_ALL 0x1b2
+#undef MC_CMD_0x1b2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
+#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
+/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
+ * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
+ */
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
+#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
+
+/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
+/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
+ * structs.
+ */
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
+#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
+
+
+/***********************************/
+/* MC_CMD_PLUGIN_REQ
+ * Send a command to a plugin. A plugin may define an arbitrary number of
+ * 'messages' which it allows applications on the host system to send, each
+ * identified by a 32-bit ID.
+ */
+#define MC_CMD_PLUGIN_REQ 0x1b3
+#undef MC_CMD_0x1b3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PLUGIN_REQ_IN msgrequest */
+#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
+#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
+#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
+#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
+/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
+#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
+#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
+/* Message ID defined by the plugin author */
+#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
+#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
+/* Data blob being the parameter to the message. This must be of the length
+ * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
+ */
+#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
+#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
+#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
+
+/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
+#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
+#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
+#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
+/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
+ * the same size as the input DATA parameter.
+ */
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
+#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
+
+/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
+ * space that maps to a contiguous region of TRGT_ADDR space. Addresses
+ * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
+ * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
+ * TRGT_ADDR_BASE.
+ */
+#define DESC_ADDR_REGION_LEN 32
+/* The start of the region in DESC_ADDR space. */
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
+#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
+/* The start of the region in TRGT_ADDR space. Drivers can set this via
+ * MC_CMD_SET_DESC_ADDR_REGIONS.
+ */
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
+#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
+/* The size of the region. */
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
+#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
+/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
+ * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
+ */
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
+#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_OFST 24
+#define DESC_ADDR_REGION_RSVD_LEN 8
+#define DESC_ADDR_REGION_RSVD_LO_OFST 24
+#define DESC_ADDR_REGION_RSVD_LO_LEN 4
+#define DESC_ADDR_REGION_RSVD_LO_LBN 192
+#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_HI_OFST 28
+#define DESC_ADDR_REGION_RSVD_HI_LEN 4
+#define DESC_ADDR_REGION_RSVD_HI_LBN 224
+#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
+#define DESC_ADDR_REGION_RSVD_LBN 192
+#define DESC_ADDR_REGION_RSVD_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_GET_DESC_ADDR_INFO
+ * Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO 0x1b7
+#undef MC_CMD_0x1b7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_DESC_ADDR_INFO_IN msgrequest */
+#define MC_CMD_GET_DESC_ADDR_INFO_IN_LEN 0
+
+/* MC_CMD_GET_DESC_ADDR_INFO_OUT msgresponse */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN 4
+/* The type of mapping; see SF-nnnnnn-xx (EF100 driver writer's guide, once
+ * written) for details of each type.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE_OFST 0
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE_LEN 4
+/* enum: TRGT_ADDR = DESC_ADDR */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_FLAT 0x0
+/* enum: DESC_ADDR has one or more regions that map into TRGT_ADDR. The base
+ * TRGT_ADDR for each region is programmable via MCDI.
+ */
+#define MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_REGIONED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_DESC_ADDR_REGIONS
+ * Returns a list of the DESC_ADDR regions for the calling function's address space. Only valid if that function's address space has the REGIONED mapping from DESC_ADDR to TRGT_ADDR.
+ */
+#define MC_CMD_GET_DESC_ADDR_REGIONS 0x1b8
+#undef MC_CMD_0x1b8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_DESC_ADDR_REGIONS_IN msgrequest */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN 0
+
+/* MC_CMD_GET_DESC_ADDR_REGIONS_OUT msgresponse */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMIN 32
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX 224
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2 992
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LEN(num) (0+32*(num))
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_NUM(len) (((len)-0)/32)
+/* An array of DESC_ADDR_REGION strutures. The number of entries in the array
+ * indicates the number of available regions.
+ */
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_OFST 0
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_LEN 32
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MINNUM 1
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM 7
+#define MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM_MCDI2 31
+
+
+/***********************************/
+/* MC_CMD_SET_DESC_ADDR_REGIONS
+ * Set the base TRGT_ADDR for a set of DESC_ADDR regions for the calling function's address space. Only valid if that function's address space had the REGIONED mapping from DESC_ADDR to TRGT_ADDR.
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS 0x1b9
+#undef MC_CMD_0x1b9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1b9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_DESC_ADDR_REGIONS_IN msgrequest */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMIN 16
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX 248
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX_MCDI2 1016
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_LEN(num) (8+8*(num))
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_NUM(len) (((len)-8)/8)
+/* A bitmask indicating which regions should have their base TRGT_ADDR updated.
+ * To update the base TRGR_ADDR for a DESC_ADDR region, the corresponding bit
+ * should be set to 1.
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK_OFST 0
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK_LEN 4
+/* Reserved field; must be set to zero. */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_RSVD_OFST 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_RSVD_LEN 4
+/* An array of values used to updated the base TRGT_ADDR for DESC_ADDR regions.
+ * Array indices corresponding to region numbers (i.e. the array is sparse, and
+ * included entries for regions even if the corresponding SET_REGION_MASK bit
+ * is zero).
+ */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_OFST 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LEN 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_OFST 8
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_LEN 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_LBN 64
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LO_WIDTH 32
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_OFST 12
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_LEN 4
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_LBN 96
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_HI_WIDTH 32
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MINNUM 1
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM 30
+#define MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM_MCDI2 126
+
+/* MC_CMD_SET_DESC_ADDR_REGIONS_OUT msgresponse */
+#define MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CLIENT_CMD
+ * Execute an arbitrary MCDI command on behalf of a different client. The
+ * consequences of the command (e.g. ownership of any resources created) apply
+ * to the indicated client rather than the function client which actually sent
+ * this command. All inherent permission checks are also performed on the
+ * indicated client. The given client must be a descendant of the requestor.
+ * The command to be proxied follows immediately afterward in the host buffer
+ * (or on the UART). Chaining multiple MC_CMD_CLIENT_CMD is unnecessary and not
+ * supported. New dynamic clients may be created with MC_CMD_CLIENT_ALLOC.
+ */
+#define MC_CMD_CLIENT_CMD 0x1ba
+#undef MC_CMD_0x1ba_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_CLIENT_CMD_IN msgrequest */
+#define MC_CMD_CLIENT_CMD_IN_LEN 4
+/* The client as which to execute the following command. */
+#define MC_CMD_CLIENT_CMD_IN_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_CMD_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_CLIENT_CMD_OUT msgresponse */
+#define MC_CMD_CLIENT_CMD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CLIENT_ALLOC
+ * Create a new client object. Clients are a system for delineating NIC
+ * resource ownership, such that groups of resources may be torn down as a
+ * unit. See also MC_CMD_CLIENT_CMD. See XN-200265-TC for background, concepts
+ * and a glossary. Clients created by this command are known as "dynamic
+ * clients". The newly-created client is a child of the client which sent this
+ * command. The caller must have the GRP_ALLOC_CLIENT privilege. The new client
+ * initially has no permission to do anything; see
+ * MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY.
+ */
+#define MC_CMD_CLIENT_ALLOC 0x1bb
+#undef MC_CMD_0x1bb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1bb_PRIVILEGE_CTG SRIOV_CTG_ALLOC_CLIENT
+
+/* MC_CMD_CLIENT_ALLOC_IN msgrequest */
+#define MC_CMD_CLIENT_ALLOC_IN_LEN 0
+
+/* MC_CMD_CLIENT_ALLOC_OUT msgresponse */
+#define MC_CMD_CLIENT_ALLOC_OUT_LEN 4
+/* The ID of the new client object which has been created. */
+#define MC_CMD_CLIENT_ALLOC_OUT_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_ALLOC_OUT_CLIENT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CLIENT_FREE
+ * Destroy and release an existing client object. All resources owned by that
+ * client (including its child clients, and thus all resources owned by the
+ * entire family tree) are freed.
+ */
+#define MC_CMD_CLIENT_FREE 0x1bc
+#undef MC_CMD_0x1bc_PRIVILEGE_CTG
+
+#define MC_CMD_0x1bc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_CLIENT_FREE_IN msgrequest */
+#define MC_CMD_CLIENT_FREE_IN_LEN 4
+/* The ID of the client to be freed. This client must be a descendant of the
+ * requestor. A client cannot free itself.
+ */
+#define MC_CMD_CLIENT_FREE_IN_CLIENT_ID_OFST 0
+#define MC_CMD_CLIENT_FREE_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_CLIENT_FREE_OUT msgresponse */
+#define MC_CMD_CLIENT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_USER
+ * Assign partial rights over this VI to another client. VIs have an 'owner'
+ * and a 'user'. The owner is the client which allocated the VI
+ * (MC_CMD_ALLOC_VIS) and cannot be changed. The user is the client which has
+ * permission to create queues and other resources on that VI. Initially
+ * user==owner, but the user can be changed by this command; the resources thus
+ * created are then owned by the user-client. Only the VI owner can call this
+ * command, and the request will fail if there are any outstanding child
+ * resources (e.g. queues) currently allocated from this VI.
+ */
+#define MC_CMD_SET_VI_USER 0x1be
+#undef MC_CMD_0x1be_PRIVILEGE_CTG
+
+#define MC_CMD_0x1be_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_USER_IN msgrequest */
+#define MC_CMD_SET_VI_USER_IN_LEN 8
+/* Function-relative VI number to modify. */
+#define MC_CMD_SET_VI_USER_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_USER_IN_INSTANCE_LEN 4
+/* Client ID to become the new user. This must be a descendant of the owning
+ * client, the owning client itself, or the special value MC_CMD_CLIENT_ID_SELF
+ * which is synonymous with the owning client.
+ */
+#define MC_CMD_SET_VI_USER_IN_CLIENT_ID_OFST 4
+#define MC_CMD_SET_VI_USER_IN_CLIENT_ID_LEN 4
+
+/* MC_CMD_SET_VI_USER_OUT msgresponse */
+#define MC_CMD_SET_VI_USER_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES
+ * A device reports a set of MAC addresses for each client to use, known as the
+ * "permanent MAC addresses". Those MAC addresses are provided by the client's
+ * administrator, e.g. via MC_CMD_SET_CLIENT_MAC_ADDRESSES, and are intended as
+ * a hint to that client which MAC address its administrator would like to use
+ * to identity itself. This API exists solely to allow communication of MAC
+ * address from administrator to adminstree, and has no inherent interaction
+ * with switching within the device. There is no guarantee that a client will
+ * be able to send traffic with a source MAC address taken from the list of MAC
+ * address reported, nor is there a guarantee that a client will be able to
+ * resource traffic with a destination MAC taken from the list of MAC
+ * addresses. Likewise, there is no guarantee that a client will not be able to
+ * use a MAC address not present in the list. Restrictions on switching are
+ * controlled either through the EVB API if operating in EVB mode, or via MAE
+ * rules if host software is directly managing the MAE. In order to allow
+ * tenants to use this API whilst a provider is using the EVB API, the MAC
+ * addresses reported by MC_CMD_GET_CLIENT_MAC_ADDRESSES will be augmented with
+ * any MAC addresses associated with the vPort assigned to the caller. In order
+ * to allow tenants to use the EVB API whilst a provider is using this API, if
+ * a client queries the MAC addresses for a vPort using the host_evb_port_id
+ * EVB_PORT_ASSIGNED, that list of MAC addresses will be augmented with the MAC
+ * addresses assigned to the calling client. This query can either be explicit
+ * (i.e. MC_CMD_VPORT_GET_MAC_ADDRESSES) or implicit (e.g. creation of a
+ * vAdaptor with a NULL/automatic MAC address). Changing the MAC address on a
+ * vAdaptor only affects VNIC steering filters; it has no effect on the MAC
+ * addresses assigned to the vAdaptor's owner. VirtIO clients behave as EVB
+ * clients. On VirtIO device reset, a vAdaptor is created with an automatic MAC
+ * address. Querying the VirtIO device's MAC address queries the underlying
+ * vAdaptor's MAC address. Setting the VirtIO device's MAC address sets the
+ * underlying vAdaptor's MAC addresses.
+ */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES 0x1c4
+#undef MC_CMD_0x1c4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN 4
+/* A handle for the client for whom MAC address should be obtained. Use
+ * CLIENT_HANDLE_SELF to obtain the MAC addresses assigned to the calling
+ * client.
+ */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_OFST 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_LEN 4
+
+/* MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMIN 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMAX 252
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(num) (0+6*(num))
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_NUM(len) (((len)-0)/6)
+/* An array of MAC addresses assigned to the client. */
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_OFST 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_LEN 6
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MINNUM 0
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MAXNUM 42
+#define MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS_MAXNUM_MCDI2 170
+
+
+/***********************************/
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES
+ * Set the permanent MAC addresses for a client. The caller must by an
+ * administrator of the target client. See MC_CMD_GET_CLIENT_MAC_ADDRESSES for
+ * additional detail.
+ */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES 0x1c5
+#undef MC_CMD_0x1c5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMIN 4
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMAX 250
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LENMAX_MCDI2 1018
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(num) (4+6*(num))
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_NUM(len) (((len)-4)/6)
+/* A handle for the client for whom MAC addresses should be set */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_OFST 0
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE_LEN 4
+/* An array of MAC addresses to assign to the client. */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_OFST 4
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_LEN 6
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MINNUM 0
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MAXNUM 41
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS_MAXNUM_MCDI2 169
+
+/* MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_ATTR
+ * Retrieve physical build-level board attributes as configured at
+ * manufacturing stage. Fields originate from EEPROM and per-platform constants
+ * in firmware. Fields are used in development to identify/ differentiate
+ * boards based on build levels/parameters, and also in manufacturing to cross
+ * check "what was programmed in manufacturing" is same as "what firmware
+ * thinks has been programmed" as there are two layers to translation within
+ * firmware before the attributes reach this MCDI handler. Some parameters are
+ * retrieved as part of other commands and therefore not replicated here. See
+ * GET_VERSION_OUT.
+ */
+#define MC_CMD_GET_BOARD_ATTR 0x1c6
+#undef MC_CMD_0x1c6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
+#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
+#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
+/* Defines board capabilities and validity of attributes returned in this
+ * response-message.
+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
+#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
+#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
+#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
+/* enum: The FPGA voltage on the adapter can be set to low */
+#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
+/* enum: The FPGA voltage on the adapter can be set to regular */
+#define MC_CMD_FPGA_VOLTAGE_REG 0x1
+/* enum: The FPGA voltage on the adapter can be set to high */
+#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
+/* An array of cage types on the board */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
+/* enum: The cages are not known */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
+/* enum: The cages are SFP/SFP+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
+/* enum: The cages are QSFP/QSFP+ */
+#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
+
+
+/***********************************/
+/* MC_CMD_GET_SOC_STATE
+ * Retrieve current state of the System-on-Chip. This command is valid when
+ * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
+ */
+#define MC_CMD_GET_SOC_STATE 0x1c7
+#undef MC_CMD_0x1c7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SOC_STATE_IN msgrequest */
+#define MC_CMD_GET_SOC_STATE_IN_LEN 0
+
+/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
+#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
+/* Status flags for the SoC */
+#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
+#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
+#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
+#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
+/* Status fields for the SoC */
+#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
+#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
+#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
+/* enum: Power on (set by SUC on power up) */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
+/* enum: Running bootloader */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
+/* enum: Bootloader has started OS. OS is booting */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
+/* enum: OS is running */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
+/* enum: Maintenance OS is running */
+#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
+/* Number of SoC resets since power on */
+#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
+#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CHECK_SCHEDULER_CREDITS
+ * For debugging purposes. For each source and destination node in the hardware
+ * schedulers, check whether the number of credits is as it should be. This
+ * should only be used when the NIC is idle, because collection is not atomic
+ * and because the expected credit counts are only meaningful when no traffic
+ * is flowing.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS 0x1c8
+#undef MC_CMD_0x1c8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CHECK_SCHEDULER_CREDITS_IN msgrequest */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_LEN 8
+/* Flags for the request */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_FLAGS_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_FLAGS_LEN 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_LBN 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_REPORT_ALL_WIDTH 1
+/* If there are too many results to fit into an MCDI response, they're split
+ * into pages. This field specifies which (0-indexed) page to request. A
+ * request with PAGE=0 will snapshot the results, and subsequent requests with
+ * PAGE>0 will return data from the most recent snapshot. The GENERATION field
+ * in the response allows callers to verify that all responses correspond to
+ * the same snapshot.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_PAGE_OFST 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_PAGE_LEN 4
+
+/* MC_CMD_CHECK_SCHEDULER_CREDITS_OUT msgresponse */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMIN 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX 240
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX_MCDI2 1008
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LEN(num) (16+16*(num))
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_NUM(len) (((len)-16)/16)
+/* The total number of results (across all pages). */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_TOTAL_RESULTS_OFST 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_TOTAL_RESULTS_LEN 4
+/* The number of pages that the response is split across. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES_OFST 4
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES_LEN 4
+/* The number of results in this response. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE_OFST 8
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE_LEN 4
+/* Result generation count. Incremented any time a request is made with PAGE=0.
+ */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_GENERATION_OFST 12
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_GENERATION_LEN 4
+/* The results, as an array of SCHED_CREDIT_CHECK_RESULT structures. */
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_OFST 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_LEN 16
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MINNUM 0
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MAXNUM 14
+#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MAXNUM_MCDI2 62
+
+
+/***********************************/
+/* MC_CMD_TXQ_STATS
+ * Query per-TXQ statistics.
+ */
+#define MC_CMD_TXQ_STATS 0x1d5
+#undef MC_CMD_0x1d5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TXQ_STATS_IN msgrequest */
+#define MC_CMD_TXQ_STATS_IN_LEN 8
+/* Instance of TXQ to retrieve statistics for */
+#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
+#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
+/* Flags for the request */
+#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
+#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
+#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
+#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
+#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
+
+/* MC_CMD_TXQ_STATS_OUT msgresponse */
+#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
+#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
+#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
+#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
+#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
+
/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
* defined in SF-120734-TC with more information in SF-122717-TC.
*/
@@ -21044,7 +25112,13 @@
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_OFST 0
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LEN 8
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_OFST 0
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_LBN 0
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_OFST 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_LBN 32
+#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_WIDTH 32
/***********************************/
@@ -21075,13 +25149,50 @@
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_OFST 8
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LEN 8
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_OFST 8
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_LBN 64
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_OFST 12
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_LBN 96
+#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_WIDTH 32
/* MC_CMD_VIRTIO_TEST_FEATURES_OUT msgresponse */
#define MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN 0
/***********************************/
+/* MC_CMD_VIRTIO_GET_CAPABILITIES
+ * Get virtio capabilities supported by the device. Returns general virtio
+ * capabilities and limitations of the hardware / firmware implementation
+ * (hardware device as a whole), rather than that of individual configured
+ * virtio devices. At present, only the absolute maximum number of queues
+ * allowed on multi-queue devices is returned. Response is expected to be
+ * extended as necessary in the future.
+ */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
+#undef MC_CMD_0x1d3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
+/* Type of device to get capabilities for. Matches the device id as defined by
+ * the virtio spec.
+ */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
+
+/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
+/* Maximum number of queues supported for a single device instance */
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
+#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
+
+
+/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -21133,17 +25244,35 @@
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_OFST 16
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_OFST 16
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_LBN 128
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_OFST 20
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_LBN 160
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_WIDTH 32
/* Address of the available ring in the virtqueue. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_OFST 24
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_OFST 24
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_LBN 192
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_OFST 28
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_LBN 224
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_WIDTH 32
/* Address of the used ring in the virtqueue. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_OFST 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_OFST 32
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_LBN 256
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_OFST 36
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_LBN 288
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_WIDTH 32
/* PASID to use on PCIe transactions involving this queue. Ignored if the
* USE_PASID flag is not set.
*/
@@ -21167,21 +25296,35 @@
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_OFST 48
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LEN 8
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_OFST 48
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_LBN 384
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_WIDTH 32
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_OFST 52
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_LEN 4
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_LBN 416
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_WIDTH 32
/* Enum values, see field(s): */
/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_OUT/FEATURES */
-/* The inital producer index for this queue's used ring. If this queue is being
- * created to be migrated into, this should be the FINAL_PIDX value returned by
- * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from. Otherwise, it
+/* The initial available index for this virtqueue. If this queue is being
+ * created to be migrated into, this should be the FINAL_AVAIL_IDX value
+ * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from (or
+ * equivalent if the original queue was on a thirdparty device). Otherwise, it
* should be zero.
*/
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_AVAIL_IDX_OFST 56
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_AVAIL_IDX_LEN 4
+/* Alias of INITIAL_AVAIL_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_OFST 56
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_LEN 4
-/* The inital consumer index for this queue's available ring. If this queue is
- * being created to be migrated into, this should be the FINAL_CIDX value
- * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from.
- * Otherwise, it should be zero.
- */
+/* The initial used index for this virtqueue. If this queue is being created to
+ * be migrated into, this should be the FINAL_USED_IDX value returned by
+ * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from (or equivalent if
+ * the original queue was on a thirdparty device). Otherwise, it should be
+ * zero.
+ */
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_USED_IDX_OFST 60
+#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_USED_IDX_LEN 4
+/* Alias of INITIAL_USED_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_OFST 60
#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_LEN 4
/* A MAE_MPORT_SELECTOR defining which mport this queue should be associated
@@ -21226,10 +25369,16 @@
/* MC_CMD_VIRTIO_FINI_QUEUE_RESP msgresponse */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN 8
-/* The producer index of the used ring when the queue was stopped. */
+/* The available index of the virtqueue when the queue was stopped. */
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_AVAIL_IDX_OFST 0
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_AVAIL_IDX_LEN 4
+/* Alias of FINAL_AVAIL_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_OFST 0
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_LEN 4
-/* The consumer index of the available ring when the queue was stopped. */
+/* The used index of the virtqueue when the queue was stopped. */
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_USED_IDX_OFST 4
+#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_USED_IDX_LEN 4
+/* Alias of FINAL_USED_IDX, kept for compatibility. */
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_OFST 4
#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_LEN 4
@@ -21309,16 +25458,40 @@
#define PCIE_FUNCTION_VF_NULL 0xffff
#define PCIE_FUNCTION_VF_LBN 16
#define PCIE_FUNCTION_VF_WIDTH 16
-/* PCIe interface of the function */
+/* PCIe interface of the function. Values should be taken from the
+ * PCIE_INTERFACE enum
+ */
#define PCIE_FUNCTION_INTF_OFST 4
#define PCIE_FUNCTION_INTF_LEN 4
-/* enum: Host PCIe interface */
+/* enum: Host PCIe interface. (Alias for HOST_PRIMARY, provided for backwards
+ * compatibility)
+ */
#define PCIE_FUNCTION_INTF_HOST 0x0
-/* enum: Application Processor interface */
+/* enum: Application Processor interface (alias for NIC_EMBEDDED, provided for
+ * backwards compatibility)
+ */
#define PCIE_FUNCTION_INTF_AP 0x1
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
+/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
+ * (absolute VI number + VI relative queue number). On Keystone, a VI can
+ * contain multiple queues (at present, up to 2), each with separate controls
+ * for direction. This structure is required to uniquely identify the absolute
+ * source queue for descriptor proxy functions.
+ */
+#define QUEUE_ID_LEN 4
+/* Absolute VI number */
+#define QUEUE_ID_ABS_VI_OFST 0
+#define QUEUE_ID_ABS_VI_LEN 2
+#define QUEUE_ID_ABS_VI_LBN 0
+#define QUEUE_ID_ABS_VI_WIDTH 16
+/* Relative queue number within the VI */
+#define QUEUE_ID_REL_QUEUE_LBN 16
+#define QUEUE_ID_REL_QUEUE_WIDTH 1
+#define QUEUE_ID_RESERVED_LBN 17
+#define QUEUE_ID_RESERVED_WIDTH 15
+
/***********************************/
/* MC_CMD_DESC_PROXY_FUNC_CREATE
@@ -21347,7 +25520,19 @@
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
/* The personality to set. The meanings of the personalities are defined in
* SF-120734-TC with more information in SF-122717-TC. At present, we only
* support proxying for VIRTIO_BLK
@@ -21371,7 +25556,19 @@
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
/***********************************/
@@ -21412,7 +25609,13 @@
#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
+#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
+#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
@@ -21485,7 +25688,13 @@
#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
+#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
+#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
@@ -21720,7 +25929,19 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
/* Function personality */
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
@@ -21733,6 +25954,10 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
/* enum: Function configuration is pending reset */
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
+/* enum: Function configuration is missing (created, but no configuration
+ * committed)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
/* Generation count to be delivered in an event once the configuration becomes
* live (if status is "pending")
*/
@@ -21742,7 +25967,7 @@
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG
+ * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
*/
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
@@ -21780,9 +26005,27 @@
#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
+#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
+#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
+#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
+#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
+#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
/* Function personality */
#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
@@ -21840,7 +26083,11 @@
* Enable descriptor proxying for function into target event queue. Returns VI
* allocation info for the proxy source function, so that the caller can map
* absolute VI IDs from descriptor proxy events back to the originating
- * function.
+ * function. This is a legacy function that only supports single queue proxy
+ * devices. It is also limited in that it can only be called after host driver
+ * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
+ * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
+ * supports multi-queue devices and has no dependency on host driver attach.
*/
#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
#undef MC_CMD_0x178_PRIVILEGE_CTG
@@ -21871,8 +26118,44 @@
/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
+ * Enable descriptor proxying for a source queue on a host function into target
+ * event queue. Source queue number is a relative virtqueue number on the
+ * source function (0 to max_virtqueues-1). For a multi-queue device, the
+ * caller must enable all source queues individually. To retrieve absolute VI
+ * information for the source function (so that VI IDs from descriptor proxy
+ * events can be mapped back to source function / queue) see
+ * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
+#undef MC_CMD_0x1d0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
+/* Source relative queue number to enable proxying on */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
+/* Descriptor proxy sink queue (caller function relative). Must be extended
+ * width event queue
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function
+ * Disable descriptor proxying for function. For multi-queue functions,
+ * disables all queues.
*/
#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
#undef MC_CMD_0x179_PRIVILEGE_CTG
@@ -21892,6 +26175,75 @@
/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
+ * Disable descriptor proxying for a specific source queue on a function.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
+#undef MC_CMD_0x1d1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
+/* Source relative queue number to disable proxying on */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_GET_VI_INFO
+ * Returns absolute VI allocation information for the descriptor proxy source
+ * function referenced by HANDLE, so that the caller can map absolute VI IDs
+ * from descriptor proxy events back to the originating function and queue. The
+ * call is only valid after the host driver for the source function has
+ * attached (after receiving a driver attach event for the descriptor proxy
+ * function) and will fail with ENOTCONN otherwise.
+ */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
+#undef MC_CMD_0x1d2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
+#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
+/* VI information (VI ID + VI relative queue number) for each of the source
+ * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
+ * structures.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
+#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
+
+
+/***********************************/
/* MC_CMD_GET_ADDR_SPC_ID
* Get Address space identifier for use in mem2mem descriptors for a given
* target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
@@ -21942,7 +26294,19 @@
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
+#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
@@ -21962,7 +26326,3381 @@
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
+#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_GET_CLIENT_HANDLE
+ * Obtain a handle for a client given a description of that client. N.B. this
+ * command is subject to change given the open discussion about how PCIe
+ * functions should be referenced on an iEP (integrated endpoint: functions
+ * span multiple buses) and multihost (multiple PCIe interfaces) system.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE 0x1c3
+#undef MC_CMD_0x1c3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLIENT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_LEN 12
+/* Type of client to get a client handle for */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_OFST 0
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_LEN 4
+/* enum: Obtain a client handle for a PCIe function-type client. */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC 0x0
+/* PCIe Function ID (as struct PCIE_FUNCTION). Valid when TYPE==FUNC. Use: -
+ * INTF=CALLER, PF=PF_NULL, VF=VF_NULL to refer to the calling function -
+ * INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
+ * a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
+ * to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
+ * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
+ * interface where ... refers to a small integer for the VF/PF fields, and to
+ * values from the PCIE_INTERFACE enum for for the INTF field. It's only
+ * meaningful to use INTF=CALLER within a structure that's an argument to
+ * MC_CMD_DEVEL_GET_CLIENT_HANDLE.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LEN 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_LBN 32
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_WIDTH 32
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_OFST 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_LBN 64
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_WIDTH 32
+/* enum: NULL value for the INTF field of struct PCIE_FUNCTION. Provided for
+ * backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
+ */
+#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_LEN 2
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_INTF_OFST 8
+#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_INTF_LEN 4
+
+/* MC_CMD_GET_CLIENT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_LEN 4
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_HANDLE_OFST 0
+#define MC_CMD_GET_CLIENT_HANDLE_OUT_HANDLE_LEN 4
+
+/* MAE_FIELD_FLAGS structuredef */
+#define MAE_FIELD_FLAGS_LEN 4
+#define MAE_FIELD_FLAGS_FLAT_OFST 0
+#define MAE_FIELD_FLAGS_FLAT_LEN 4
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_OFST 0
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_LBN 0
+#define MAE_FIELD_FLAGS_SUPPORT_STATUS_WIDTH 6
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_OFST 0
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_LBN 6
+#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_WIDTH 1
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_OFST 0
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_LBN 7
+#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_WIDTH 1
+#define MAE_FIELD_FLAGS_FLAT_LBN 0
+#define MAE_FIELD_FLAGS_FLAT_WIDTH 32
+
+/* MAE_ENC_FIELD_PAIRS structuredef: Mask and value pairs for all fields that
+ * it makes sense to use to determine the encapsulation type of a packet. Its
+ * intended use is to keep a common packing of fields across multiple MCDI
+ * commands, keeping things inherently sychronised and allowing code shared. To
+ * use in an MCDI command, the command should end with a variable length byte
+ * array populated with this structure. Do not extend this structure. Instead,
+ * create _Vx versions with the necessary fields appended. That way, the
+ * existing semantics for extending MCDI commands are preserved.
+ */
+#define MAE_ENC_FIELD_PAIRS_LEN 156
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_OFST 8
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_LBN 64
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_OFST 10
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_LBN 80
+#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_OFST 12
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_LBN 96
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_OFST 14
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_LBN 112
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_OFST 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_LBN 128
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_OFST 18
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LBN 144
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_OFST 20
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_LBN 160
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_OFST 22
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_LBN 176
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_OFST 24
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_LBN 192
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_OFST 26
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LBN 208
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_OFST 28
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_LBN 224
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_OFST 34
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_LBN 272
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_OFST 40
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_LBN 320
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_OFST 46
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_LBN 368
+#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_OFST 52
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_LBN 416
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_OFST 56
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_LBN 448
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_OFST 60
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_LBN 480
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_OFST 76
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_LBN 608
+#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_OFST 92
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_LBN 736
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_OFST 96
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_LBN 768
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_OFST 100
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_LBN 800
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_OFST 116
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_LBN 928
+#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_OFST 132
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_LBN 1056
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_OFST 133
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_LBN 1064
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_OFST 134
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_LBN 1072
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_OFST 135
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_LBN 1080
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_OFST 136
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_LBN 1088
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_OFST 137
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_LBN 1096
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_WIDTH 8
+/* Deprecated in favour of ENC_FLAGS alias. */
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_LBN 0
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_LBN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_LBN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_LBN 1104
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_WIDTH 8
+/* More generic alias for ENC_VLAN_FLAGS. */
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_OFST 138
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_LBN 1104
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_WIDTH 8
+/* Deprecated in favour of ENC_FLAGS_MASK alias. */
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_LBN 0
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_LBN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_LBN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_WIDTH 1
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_LBN 1112
+#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_WIDTH 8
+/* More generic alias for ENC_FLAGS_MASK. */
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_OFST 139
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_LEN 1
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_LBN 1112
+#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_WIDTH 8
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_OFST 140
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_LBN 1120
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_OFST 144
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_LBN 1152
+#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_OFST 148
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_LBN 1184
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_OFST 150
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_LBN 1200
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_OFST 152
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_LBN 1216
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_OFST 154
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_LBN 1232
+#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_WIDTH 16
+
+/* MAE_FIELD_MASK_VALUE_PAIRS structuredef: Mask and value pairs for all fields
+ * currently defined. Same semantics as MAE_ENC_FIELD_PAIRS.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_LEN 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_OFST 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_LBN 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_OFST 12
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_LBN 96
+#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_OFST 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_LBN 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_OFST 18
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_LBN 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_OFST 20
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_LBN 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_OFST 22
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_LBN 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_OFST 24
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_LBN 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_OFST 26
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_LBN 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_OFST 28
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_LBN 224
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_OFST 30
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_LBN 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_OFST 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_LBN 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_OFST 34
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_LBN 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_OFST 36
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_LBN 288
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_OFST 42
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_LBN 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_OFST 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_LBN 384
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_OFST 54
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_LBN 432
+#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_OFST 60
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_LBN 480
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_OFST 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_LBN 512
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_OFST 68
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_LBN 544
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_OFST 84
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_LBN 672
+#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_OFST 100
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_LBN 800
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_OFST 104
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_LBN 832
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_OFST 108
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_LBN 864
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_OFST 124
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_LBN 992
+#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_OFST 140
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_LBN 1120
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_OFST 141
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_LBN 1128
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_OFST 142
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_LBN 1136
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_OFST 143
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_LBN 1144
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_WIDTH 8
+/* Due to hardware limitations, firmware may return
+ * MC_CMD_ERR_EINVAL(BAD_IP_TTL) when attempting to match on an IP_TTL value
+ * other than 1.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_OFST 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_LBN 1152
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_OFST 145
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_LBN 1160
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_OFST 148
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_LBN 1184
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_OFST 152
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_LBN 1216
+#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_OFST 156
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_LBN 1248
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_OFST 158
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_LBN 1264
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_OFST 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_LBN 1280
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_OFST 162
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_LBN 1296
+#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_OFST 164
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_LBN 1312
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_OFST 166
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_LBN 1328
+#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_OFST 168
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_LBN 1344
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_OFST 172
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_LBN 1376
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_OFST 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_LBN 1408
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_OFST 180
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_LBN 1440
+#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_OFST 184
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_LBN 1472
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_OFST 188
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_LBN 1504
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_OFST 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_LBN 1536
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_OFST 194
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_LBN 1552
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_OFST 196
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_LBN 1568
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_OFST 198
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LBN 1584
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_OFST 200
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_LBN 1600
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_OFST 202
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_LBN 1616
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_OFST 204
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_LBN 1632
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_OFST 206
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LBN 1648
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_OFST 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_LBN 1664
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_OFST 214
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_LBN 1712
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_OFST 220
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_LBN 1760
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_OFST 226
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_LBN 1808
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_OFST 232
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_LBN 1856
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_OFST 236
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_LBN 1888
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_OFST 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_LBN 1920
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_OFST 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_LBN 2048
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_OFST 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_LBN 2176
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_OFST 276
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_LBN 2208
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_OFST 280
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_LBN 2240
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_OFST 296
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_LBN 2368
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_OFST 312
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_LBN 2496
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_OFST 313
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_LBN 2504
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_OFST 314
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_LBN 2512
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_OFST 315
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_LBN 2520
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_OFST 316
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_LBN 2528
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_OFST 317
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_LBN 2536
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_OFST 320
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_LBN 2560
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_OFST 324
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_LBN 2592
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_OFST 328
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_LBN 2624
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_OFST 330
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_LBN 2640
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_OFST 332
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_LBN 2656
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_OFST 334
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_LBN 2672
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_OFST 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_LBN 2688
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_OFST 340
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_LBN 2720
+#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_WIDTH 32
+
+/* MAE_FIELD_MASK_VALUE_PAIRS_V2 structuredef */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN 372
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_OFST 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_OFST 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_LBN 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_OFST 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_LBN 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_OFST 12
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_LBN 96
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_OFST 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_LBN 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_OFST 18
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_LBN 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_OFST 20
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_LBN 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_OFST 22
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_LBN 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_OFST 24
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_LBN 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_OFST 26
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_LBN 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_OFST 28
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_LBN 224
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_OFST 30
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_LBN 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_OFST 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_LBN 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_OFST 34
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_LBN 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_OFST 36
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_LBN 288
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_OFST 42
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_LBN 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_OFST 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_LBN 384
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_OFST 54
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_LBN 432
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_OFST 60
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_LBN 480
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_OFST 64
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_LBN 512
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_OFST 68
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_LBN 544
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_OFST 84
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_LBN 672
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_OFST 100
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_LBN 800
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_OFST 104
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_LBN 832
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_OFST 108
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_LBN 864
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_OFST 124
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_LBN 992
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_OFST 140
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_LBN 1120
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_OFST 141
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_LBN 1128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_OFST 142
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_LBN 1136
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_OFST 143
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_LBN 1144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_WIDTH 8
+/* Due to hardware limitations, firmware may return
+ * MC_CMD_ERR_EINVAL(BAD_IP_TTL) when attempting to match on an IP_TTL value
+ * other than 1.
+ */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_OFST 144
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_LBN 1152
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_OFST 145
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_LBN 1160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_OFST 148
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_LBN 1184
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_OFST 152
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_LBN 1216
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_OFST 156
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_LBN 1248
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_OFST 158
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_LBN 1264
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_OFST 160
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_LBN 1280
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_OFST 162
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_LBN 1296
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_OFST 164
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_LBN 1312
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_OFST 166
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_LBN 1328
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_OFST 168
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_LBN 1344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_OFST 172
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_LBN 1376
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_OFST 176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_LBN 1408
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_OFST 180
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_LBN 1440
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_OFST 184
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_LBN 1472
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_OFST 188
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_LBN 1504
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_OFST 192
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_LBN 1536
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_OFST 194
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_LBN 1552
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_OFST 196
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_LBN 1568
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_OFST 198
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_LBN 1584
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_OFST 200
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_LBN 1600
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_OFST 202
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_LBN 1616
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_OFST 204
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_LBN 1632
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_OFST 206
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_LBN 1648
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_OFST 208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_LBN 1664
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_OFST 214
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_LBN 1712
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_OFST 220
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_LBN 1760
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_OFST 226
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_LEN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_LBN 1808
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_WIDTH 48
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_OFST 232
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_LBN 1856
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_OFST 236
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_LBN 1888
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_OFST 240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_LBN 1920
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_OFST 256
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_LBN 2048
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_OFST 272
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_LBN 2176
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_OFST 276
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_LBN 2208
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_OFST 280
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_LBN 2240
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_OFST 296
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_LEN 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_LBN 2368
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_WIDTH 128
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_OFST 312
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_LBN 2496
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_OFST 313
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_LBN 2504
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_OFST 314
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_LBN 2512
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_OFST 315
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_LBN 2520
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_OFST 316
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_LBN 2528
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_OFST 317
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_LBN 2536
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_OFST 320
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_LBN 2560
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_OFST 324
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_LBN 2592
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_OFST 328
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_LBN 2624
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_OFST 330
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_LBN 2640
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_OFST 332
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_LBN 2656
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_OFST 334
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_LBN 2672
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_OFST 336
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_LBN 2688
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_OFST 340
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_LBN 2720
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_LBN 0
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_LBN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_LBN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_LBN 3
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_LBN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_LBN 5
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_LBN 6
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_LBN 7
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_LBN 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_OFST 344
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_LBN 9
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_WIDTH 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_LBN 2752
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_OFST 348
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_LBN 2784
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_OFST 352
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_LBN 2816
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_OFST 354
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_LEN 2
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_LBN 2832
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_WIDTH 16
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_OFST 356
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_LBN 2848
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_OFST 360
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_LEN 4
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_LBN 2880
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_WIDTH 32
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_OFST 364
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_LBN 2912
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_WIDTH 8
+/* Set to zero. */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_OFST 365
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_LBN 2920
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_OFST 366
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_LBN 2928
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_WIDTH 8
+/* Set to zero. */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_OFST 367
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_LBN 2936
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_OFST 368
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_LBN 2944
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_WIDTH 8
+/* Set to zero */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_OFST 369
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_LBN 2952
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_WIDTH 8
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_OFST 370
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_LBN 2960
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_WIDTH 8
+/* Set to zero */
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_OFST 371
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_LEN 1
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_LBN 2968
+#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_WIDTH 8
+
+/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
+ * integer value (mport_id) that is guaranteed to be representable within
+ * 32-bits or within any NIC interface field that needs store the value
+ * (whichever is narrowers). This selector structure provides a stable way to
+ * refer to m-ports.
+ */
+#define MAE_MPORT_SELECTOR_LEN 4
+/* Used to force the tools to output bitfield-style defines for this structure.
+ */
+#define MAE_MPORT_SELECTOR_FLAT_OFST 0
+#define MAE_MPORT_SELECTOR_FLAT_LEN 4
+/* enum: An m-port selector value that is guaranteed never to represent a real
+ * mport
+ */
+#define MAE_MPORT_SELECTOR_NULL 0x0
+/* enum: The m-port assigned to the calling client. */
+#define MAE_MPORT_SELECTOR_ASSIGNED 0x1000000
+#define MAE_MPORT_SELECTOR_TYPE_OFST 0
+#define MAE_MPORT_SELECTOR_TYPE_LBN 24
+#define MAE_MPORT_SELECTOR_TYPE_WIDTH 8
+/* enum: The MPORT connected to a given physical port */
+#define MAE_MPORT_SELECTOR_TYPE_PPORT 0x2
+/* enum: The MPORT assigned to a given PCIe function. Deprecated in favour of
+ * MH_FUNC.
+ */
+#define MAE_MPORT_SELECTOR_TYPE_FUNC 0x3
+/* enum: An mport_id */
+#define MAE_MPORT_SELECTOR_TYPE_MPORT_ID 0x4
+/* enum: The MPORT assigned to a given PCIe function (see also FWRIVERHD-1108)
+ */
+#define MAE_MPORT_SELECTOR_TYPE_MH_FUNC 0x5
+/* enum: This is guaranteed never to be a valid selector type */
+#define MAE_MPORT_SELECTOR_TYPE_INVALID 0xff
+#define MAE_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MAE_MPORT_SELECTOR_MPORT_ID_LBN 0
+#define MAE_MPORT_SELECTOR_MPORT_ID_WIDTH 24
+#define MAE_MPORT_SELECTOR_PPORT_ID_OFST 0
+#define MAE_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MAE_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_HOST_PRIMARY 0x1 /* enum */
+#define MAE_MPORT_SELECTOR_NIC_EMBEDDED 0x2 /* enum */
+/* enum: Deprecated, use CALLER_INTF instead. */
+#define MAE_MPORT_SELECTOR_CALLER 0xf
+#define MAE_MPORT_SELECTOR_CALLER_INTF 0xf /* enum */
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_LBN 16
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_WIDTH 8
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_LBN 0
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_WIDTH 16
+/* enum: Used for VF_ID to indicate a physical function. */
+#define MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL 0xffff
+/* enum: Used for PF_ID to indicate the physical function of the calling
+ * client. - When used by a PF with VF_ID == VF_ID_NULL, the mport selector
+ * relates to the calling function. (For clarity, it is recommended that
+ * clients use ASSIGNED to achieve this behaviour). - When used by a PF with
+ * VF_ID != VF_ID_NULL, the mport selector relates to a VF child of the calling
+ * function. - When used by a VF with VF_ID == VF_ID_NULL, the mport selector
+ * relates to the PF owning the calling function. - When used by a VF with
+ * VF_ID != VF_ID_NULL, the mport selector relates to a sibling VF of the
+ * calling function. - Not meaningful used by a client that is not a PCIe
+ * function.
+ */
+#define MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER 0xff
+/* enum: Same as PF_ID_CALLER, but for use in the smaller MH_PF_ID field. Only
+ * valid if FUNC_INTF_ID is CALLER.
+ */
+#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_CALLER 0xf
+#define MAE_MPORT_SELECTOR_FLAT_LBN 0
+#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
+
+/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
+ * virtual network port by MAE port and link end
+ */
+#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
+/* The MAE MPORT of interest */
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_WIDTH 32
+/* Which end of the link identified by MPORT to consider */
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_OFST 4
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MPORT_END */
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_LBN 32
+#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_WIDTH 32
+/* A field for accessing the endpoint selector as a collection of bits */
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LEN 8
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_OFST 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_WIDTH 32
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_OFST 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_LEN 4
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_LBN 32
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_WIDTH 32
+/* enum: Set FLAT to this value to obtain backward-compatible behaviour in
+ * commands that have been extended to take a MAE_LINK_ENDPOINT_SELECTOR
+ * argument. New commands that are designed to take such an argument from the
+ * start will not support this.
+ */
+#define MAE_LINK_ENDPOINT_SELECTOR_MAE_LINK_ENDPOINT_COMPAT 0x0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LBN 0
+#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_CAPS
+ * Describes capabilities of the MAE (Match-Action Engine)
+ */
+#define MC_CMD_MAE_GET_CAPS 0x140
+#undef MC_CMD_0x140_PRIVILEGE_CTG
+
+#define MC_CMD_0x140_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_GET_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_OUT_LEN 52
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_OUT_API_VER_LEN 4
+
+/* MC_CMD_MAE_GET_CAPS_V2_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_LEN 60
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_API_VER_LEN 4
+/* Mask of supported counter types. Each bit position corresponds to a value of
+ * the MAE_COUNTER_TYPE enum. If this field is missing (i.e. V1 response),
+ * clients must assume that only AR counters are supported (i.e.
+ * COUNTER_TYPES_SUPPORTED==0x1). See also
+ * MC_CMD_MAE_COUNTERS_STREAM_START/COUNTER_TYPES_MASK.
+ */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_TYPES_SUPPORTED_OFST 52
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_TYPES_SUPPORTED_LEN 4
+/* The total number of conntrack counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_CT_COUNTERS_OFST 56
+#define MC_CMD_MAE_GET_CAPS_V2_OUT_CT_COUNTERS_LEN 4
+
+/* MC_CMD_MAE_GET_CAPS_V3_OUT msgresponse */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_LEN 64
+/* The number of field IDs that the NIC supports. Any field with a ID greater
+ * than or equal to the value returned in this field must be treated as having
+ * a support level of MAE_FIELD_UNSUPPORTED in all requests.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_MATCH_FIELD_COUNT_OFST 0
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_MATCH_FIELD_COUNT_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPES_SUPPORTED_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPES_SUPPORTED_LEN 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_LBN 0
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_LBN 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_LBN 2
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_WIDTH 1
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_OFST 4
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_LBN 3
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_WIDTH 1
+/* Deprecated alias for AR_COUNTERS. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTERS_LEN 4
+/* The total number of AR counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_AR_COUNTERS_OFST 8
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_AR_COUNTERS_LEN 4
+/* The total number of counters lists available to allocate. A value of zero
+ * indicates that counter lists are not supported by the NIC. (But single
+ * counters may still be.)
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_LISTS_OFST 12
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_LISTS_LEN 4
+/* The total number of encap header structures available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_HEADER_LIMIT_OFST 16
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_HEADER_LIMIT_LEN 4
+/* Reserved. Should be zero. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_RSVD_OFST 20
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_RSVD_LEN 4
+/* The total number of action sets available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SETS_OFST 24
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SETS_LEN 4
+/* The total number of action set lists available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SET_LISTS_OFST 28
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SET_LISTS_LEN 4
+/* The total number of outer rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_RULES_OFST 32
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_RULES_LEN 4
+/* The total number of action rules available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_RULES_OFST 36
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_RULES_LEN 4
+/* The number of priorities available for ACTION_RULE filters. It is invalid to
+ * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_PRIOS_OFST 40
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_PRIOS_LEN 4
+/* The number of priorities available for OUTER_RULE filters. It is invalid to
+ * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_PRIOS_OFST 44
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_PRIOS_LEN 4
+/* MAE API major version. Currently 1. If this field is not present in the
+ * response (i.e. response shorter than 384 bits), then its value is zero. If
+ * the value does not match the client's expectations, the client should raise
+ * a fatal error.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_API_VER_OFST 48
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_API_VER_LEN 4
+/* Mask of supported counter types. Each bit position corresponds to a value of
+ * the MAE_COUNTER_TYPE enum. If this field is missing (i.e. V1 response),
+ * clients must assume that only AR counters are supported (i.e.
+ * COUNTER_TYPES_SUPPORTED==0x1). See also
+ * MC_CMD_MAE_COUNTERS_STREAM_START/COUNTER_TYPES_MASK.
+ */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_TYPES_SUPPORTED_OFST 52
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_TYPES_SUPPORTED_LEN 4
+/* The total number of conntrack counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_CT_COUNTERS_OFST 56
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_CT_COUNTERS_LEN 4
+/* The total number of Outer Rule counters available to allocate. */
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OR_COUNTERS_OFST 60
+#define MC_CMD_MAE_GET_CAPS_V3_OUT_OR_COUNTERS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_AR_CAPS
+ * Get a level of support for match fields when used in match-action rules
+ */
+#define MC_CMD_MAE_GET_AR_CAPS 0x141
+#undef MC_CMD_0x141_PRIVILEGE_CTG
+
+#define MC_CMD_0x141_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_GET_AR_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_AR_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_AR_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMIN 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX 252
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_NUM(len) (((len)-4)/4)
+/* Number of fields actually returned in FIELD_FLAGS. */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_OFST 0
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_LEN 4
+/* Array of values indicating the NIC's support for a given field, indexed by
+ * field id. The driver must ensure space for
+ * MC_CMD_MAE_GET_CAPS.MATCH_FIELD_COUNT entries in the array..
+ */
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_LEN 4
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MINNUM 0
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MAXNUM 62
+#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_MAE_GET_OR_CAPS
+ * Get a level of support for fields used in outer rule keys.
+ */
+#define MC_CMD_MAE_GET_OR_CAPS 0x142
+#undef MC_CMD_0x142_PRIVILEGE_CTG
+
+#define MC_CMD_0x142_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_GET_OR_CAPS_IN msgrequest */
+#define MC_CMD_MAE_GET_OR_CAPS_IN_LEN 0
+
+/* MC_CMD_MAE_GET_OR_CAPS_OUT msgresponse */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMIN 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX 252
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_NUM(len) (((len)-4)/4)
+/* Number of fields actually returned in FIELD_FLAGS. */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_OFST 0
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_LEN 4
+/* Same semantics as MC_CMD_MAE_GET_AR_CAPS.MAE_FIELD_FLAGS */
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_OFST 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_LEN 4
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MINNUM 0
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MAXNUM 62
+#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTER_ALLOC
+ * Allocate match-action-engine counters, which can be referenced in various
+ * tables.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC 0x143
+#undef MC_CMD_0x143_PRIVILEGE_CTG
+
+#define MC_CMD_0x143_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTER_ALLOC_IN msgrequest: Using this is equivalent to using V2
+ * with COUNTER_TYPE=AR.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_LEN 4
+/* The number of counters that the driver would like allocated */
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT_LEN 4
+
+/* MC_CMD_MAE_COUNTER_ALLOC_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_LEN 8
+/* The number of counters that the driver would like allocated */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT_LEN 4
+/* Which type of counter to allocate. */
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE_OFST 4
+#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_TYPE */
+
+/* MC_CMD_MAE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMIN 12
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NUM(len) (((len)-8)/4)
+/* Generation count. Packets with generation count >= GENERATION_COUNT will
+ * contain valid counter values for counter IDs allocated in this call, unless
+ * the counter values are zero and zero squash is enabled. Note that there is
+ * an independent GENERATION_COUNT object per counter type, and that generation
+ * counts wrap from 0xffffffff to 1.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_LEN 4
+/* enum: Generation counter 0 is reserved and unused. */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_INVALID 0x0
+/* The number of counter IDs that the NIC allocated. It is never less than 1;
+ * failure to allocate a single counter will cause an error to be returned. It
+ * is never greater than REQUESTED_COUNT, but may be less.
+ */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT_OFST 4
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT_LEN 4
+/* An array containing the IDs for the counters allocated. */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 8
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 61
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
+/* enum: A counter ID that is guaranteed never to represent a real counter */
+#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTER_FREE
+ * Free match-action-engine counters
+ */
+#define MC_CMD_MAE_COUNTER_FREE 0x144
+#undef MC_CMD_0x144_PRIVILEGE_CTG
+
+#define MC_CMD_0x144_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTER_FREE_IN msgrequest: Using this is equivalent to using V2
+ * with COUNTER_TYPE=AR.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMIN 8
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMAX 132
+#define MC_CMD_MAE_COUNTER_FREE_IN_LENMAX_MCDI2 132
+#define MC_CMD_MAE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_NUM(len) (((len)-4)/4)
+/* The number of counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT_LEN 4
+/* An array containing the counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_COUNTER_FREE_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_LEN 136
+/* The number of counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT_LEN 4
+/* An array containing the counter IDs to be freed. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MAXNUM_MCDI2 32
+/* Which type of counter to free. */
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE_OFST 132
+#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_TYPE */
+
+/* MC_CMD_MAE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMIN 12
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX 136
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX_MCDI2 136
+#define MC_CMD_MAE_COUNTER_FREE_OUT_LEN(num) (8+4*(num))
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_NUM(len) (((len)-8)/4)
+/* Generation count. A packet with generation count == GENERATION_COUNT will
+ * contain the final values for these counter IDs, unless the counter values
+ * are zero and zero squash is enabled. Note that the GENERATION_COUNT value is
+ * specific to the COUNTER_TYPE (IDENTIFIER field in packet header). Receiving
+ * a packet with generation count > GENERATION_COUNT guarantees that no more
+ * values will be written for these counters. If values for these counter IDs
+ * are present, the counter ID has been reallocated. A counter ID will not be
+ * reallocated within a single read cycle as this would merge increments from
+ * the 'old' and 'new' counters. GENERATION_COUNT_INVALID is reserved and
+ * unused.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTER_FREE_OUT_GENERATION_COUNT_LEN 4
+/* The number of counter IDs actually freed. It is never less than 1; failure
+ * to free a single counter will cause an error to be returned. It is never
+ * greater than the number that were requested to be freed, but may be less if
+ * counters could not be freed.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT_OFST 4
+#define MC_CMD_MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT_LEN 4
+/* An array containing the IDs for the counters to that were freed. Note,
+ * failure to free a counter can only occur on incorrect driver behaviour, so
+ * asserting that the expected counters were freed is reasonable. When
+ * debugging, attempting to free a single counter at a time will provide a
+ * reason for the failure to free said counter.
+ */
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_OFST 8
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MINNUM 1
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MAXNUM 32
+#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_START
+ * Start streaming counter values, specifying an RxQ to deliver packets to.
+ * Counters allocated to the calling function will be written in a round robin
+ * at a fixed cycle rate, assuming sufficient credits are available. The driver
+ * may cause the counter values to be written at a slower rate by constraining
+ * the availability of credits. Note that if the driver wishes to deliver
+ * packets to a different queue, it must call MAE_COUNTERS_STREAM_STOP to stop
+ * delivering packets to the current queue first.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START 0x151
+#undef MC_CMD_0x151_PRIVILEGE_CTG
+
+#define MC_CMD_0x151_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_IN msgrequest: Using V1 is equivalent to V2
+ * with COUNTER_TYPES_MASK=0x1 (i.e. AR counters only).
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_LEN 8
+/* The RxQ to write packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_QID_LEN 2
+/* Maximum size in bytes of packets that may be written to the RxQ. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_PACKET_SIZE_OFST 2
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_PACKET_SIZE_LEN 2
+/* Optional flags. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_FLAGS_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_WIDTH 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_LBN 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_WIDTH 1
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_LEN 12
+/* The RxQ to write packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_QID_LEN 2
+/* Maximum size in bytes of packets that may be written to the RxQ. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE_OFST 2
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE_LEN 2
+/* Optional flags. */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_WIDTH 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_OFST 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_LBN 1
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_WIDTH 1
+/* Mask of which counter types should be reported. Each bit position
+ * corresponds to a value of the MAE_COUNTER_TYPE enum. For example a value of
+ * 0x3 requests both AR and CT counters. A value of zero is invalid. Counter
+ * types not selected by the mask value won't be included in the stream. If a
+ * client wishes to change which counter types are reported, it must first call
+ * MAE_COUNTERS_STREAM_STOP, then restart it with the new mask value.
+ * Requesting a counter type which isn't supported by firmware (reported in
+ * MC_CMD_MAE_GET_CAPS/COUNTER_TYPES_SUPPORTED) will result in ENOTSUP.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK_OFST 8
+#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_START_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_FLAGS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_FLAGS_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_LBN 0
+#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP
+ * Stop streaming counter values to the specified RxQ.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP 0x152
+#undef MC_CMD_0x152_PRIVILEGE_CTG
+
+#define MC_CMD_0x152_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_LEN 2
+/* The RxQ to stop writing packets to. */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_QID_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_QID_LEN 2
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_LEN 4
+/* Generation count for AR counters. The final set of AR counter values will be
+ * written out in packets with count == GENERATION_COUNT. An empty packet with
+ * count > GENERATION_COUNT indicates that no more counter values of this type
+ * will be written to this stream. GENERATION_COUNT_INVALID is reserved and
+ * unused.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_GENERATION_COUNT_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMIN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX 32
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX_MCDI2 32
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_NUM(len) (((len)-0)/4)
+/* Array of generation counts, indexed by MAE_COUNTER_TYPE. Note that since
+ * MAE_COUNTER_TYPE_AR==0, this response is backwards-compatible with V1. The
+ * final set of counter values will be written out in packets with count ==
+ * GENERATION_COUNT. An empty packet with count > GENERATION_COUNT indicates
+ * that no more counter values of this type will be written to this stream.
+ * GENERATION_COUNT_INVALID is reserved and unused.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_LEN 4
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MINNUM 1
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MAXNUM 8
+#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MAXNUM_MCDI2 8
+
+
+/***********************************/
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS
+ * Give a number of credits to the packetiser. Each credit received allows the
+ * MC to write one packet to the RxQ, therefore for each credit the driver must
+ * have written sufficient descriptors for a packet of length
+ * MAE_COUNTERS_PACKETISER_STREAM_START/PACKET_SIZE and rung the doorbell.
+ */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS 0x153
+#undef MC_CMD_0x153_PRIVILEGE_CTG
+
+#define MC_CMD_0x153_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN msgrequest */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_LEN 4
+/* Number of credits to give to the packetiser. */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS_OFST 0
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS_LEN 4
+
+/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT msgresponse */
+#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC
+ * Allocate an encapsulation header to be used in an Action Rule response. The
+ * header must be constructed as a valid packet with 0-length payload.
+ * Specifically, the L3/L4 lengths & checksums will only be incrementally fixed
+ * by the NIC, rather than recomputed entirely. Currently only IPv4, IPv6 and
+ * UDP are supported. If the maximum number of headers have already been
+ * allocated then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC 0x148
+#undef MC_CMD_0x148_PRIVILEGE_CTG
+
+#define MC_CMD_0x148_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX 252
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LEN(num) (4+1*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_NUM(len) (((len)-4)/1)
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_OFST 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_LEN 1
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MINNUM 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM 248
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM_MCDI2 1016
+
+/* MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_LEN 4
+/* enum: An encap metadata ID that is guaranteed never to represent real encap
+ * metadata
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE
+ * Update encap action metadata. See comments for MAE_ENCAP_HEADER_ALLOC.
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE 0x149
+#undef MC_CMD_0x149_PRIVILEGE_CTG
+
+#define MC_CMD_0x149_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMIN 8
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX 252
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LEN(num) (8+1*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_NUM(len) (((len)-8)/1)
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE_OFST 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_OFST 8
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_LEN 1
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MINNUM 0
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM 244
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM_MCDI2 1012
+
+/* MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ENCAP_HEADER_FREE
+ * Free encap action metadata
+ */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE 0x14a
+#undef MC_CMD_0x14a_PRIVILEGE_CTG
+
+#define MC_CMD_0x14a_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ENCAP_HEADER_FREE_IN msgrequest */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MINNUM 1
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MAXNUM 32
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ENCAP_HEADER_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_OFST 0
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_LEN 4
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MINNUM 1
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MAXNUM 32
+#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MAC_ADDR_ALLOC
+ * Allocate MAC address. Hardware implementations have MAC addresses programmed
+ * into an indirection table, and clients should take care not to allocate the
+ * same MAC address twice (but instead reuse its ID). If the maximum number of
+ * MAC addresses have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC 0x15e
+#undef MC_CMD_0x15e_PRIVILEGE_CTG
+
+#define MC_CMD_0x15e_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MAC_ADDR_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN 6
+/* MAC address as bytes in network order. */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN 6
+
+/* MC_CMD_MAE_MAC_ADDR_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_LEN 4
+/* enum: An MAC address ID that is guaranteed never to represent a real MAC
+ * address.
+ */
+#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_MAC_ADDR_FREE
+ * Free MAC address.
+ */
+#define MC_CMD_MAE_MAC_ADDR_FREE 0x15f
+#undef MC_CMD_0x15f_PRIVILEGE_CTG
+
+#define MC_CMD_0x15f_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MAC_ADDR_FREE_IN msgrequest */
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MINNUM 1
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MAXNUM 32
+#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_MAC_ADDR_FREE_OUT msgresponse */
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_OFST 0
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_LEN 4
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MINNUM 1
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MAXNUM 32
+#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_ALLOC
+ * Allocate an action set, which can be referenced either in response to an
+ * Action Rule, or as part of an Action Set List. If the maxmimum number of
+ * action sets have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC 0x14d
+#undef MC_CMD_0x14d_PRIVILEGE_CTG
+
+#define MC_CMD_0x14d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * COUNTER_LIST_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
+ * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID_LEN 4
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V2_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V7_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LEN 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * COUNTER_LIST_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
+ * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+
+/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
+/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
+ * ACTION_SET_ID_NULL. This allows an AS_ID to be distinguished from an ASL_ID
+ * returned from MC_CMD_MAE_ACTION_SET_LIST_ALLOC.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_AS_ID_LEN 4
+/* enum: An action set ID that is guaranteed never to represent an action set
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_FREE
+ */
+#define MC_CMD_MAE_ACTION_SET_FREE 0x14e
+#undef MC_CMD_0x14e_PRIVILEGE_CTG
+
+#define MC_CMD_0x14e_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_FREE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_SET_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC
+ * Allocate an action set list (ASL) that can be referenced by an ID. The ASL
+ * ID can be used when inserting an action rule, so that for each packet
+ * matching the rule every action set in the list is applied. If the maximum
+ * number of ASLs have already been allocated then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC 0x14f
+#undef MC_CMD_0x14f_PRIVILEGE_CTG
+
+#define MC_CMD_0x14f_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMIN 8
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX 252
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(num) (4+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_NUM(len) (((len)-4)/4)
+/* Number of elements in the AS_IDS field. */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_COUNT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_COUNT_LEN 4
+/* The IDs of the action sets in this list. The last element of this list may
+ * be the ID of an already allocated ASL. In this case the action sets from the
+ * already allocated ASL will be applied after the action sets supplied by this
+ * request. This mechanism can be used to reduce resource usage in the case
+ * where one ASL is a sublist of another ASL. The sublist should be allocated
+ * first, then the superlist should be allocated by supplying all required
+ * action set IDs that are not in the sublist followed by the ID of the
+ * sublist. One sublist can be referenced by multiple superlists.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_OFST 4
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM 62
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2 254
+
+/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN 4
+/* The MSB of the ASL_ID is guaranteed to be set. This allows an ASL_ID to be
+ * distinguished from an AS_ID returned from MC_CMD_MAE_ACTION_SET_ALLOC.
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN 4
+/* enum: An action set list ID that is guaranteed never to represent an action
+ * set list
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE
+ * Free match-action-engine redirect_lists
+ */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE 0x150
+#undef MC_CMD_0x150_PRIVILEGE_CTG
+
+#define MC_CMD_0x150_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_OFST 0
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_LEN 4
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_INSERT
+ * Inserts an Outer Rule, which controls encapsulation parsing, and may
+ * influence the Lookup Sequence. If the maximum number of rules have already
+ * been inserted then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT 0x15a
+#undef MC_CMD_0x15a_PRIVILEGE_CTG
+
+#define MC_CMD_0x15a_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_INSERT_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMIN 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX 252
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(num) (16+1*(num))
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_NUM(len) (((len)-16)/1)
+/* Packets matching the rule will be parsed with this encapsulation. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MCDI_ENCAP_TYPE */
+/* Match priority. Lower values have higher priority. Must be less than
+ * MC_CMD_MAE_GET_CAPS_OUT.ENCAP_PRIOS If a packet matches two filters with
+ * equal priority then it is unspecified which takes priority.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_PRIO_OFST 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_PRIO_LEN 4
+/* Deprecated alias for ACTION_CONTROL. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_LBN 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_LBN 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_LBN 3
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_LBN 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_WIDTH 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_LBN 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_WIDTH 16
+/* This field controls the actions that are performed when a rule is hit. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ACTION_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ACTION_CONTROL_LEN 4
+/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
+ * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_COUNTER_ID_OFST 12
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_COUNTER_ID_LEN 4
+/* Structure of the format MAE_ENC_FIELD_PAIRS. */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_OFST 16
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_LEN 1
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MINNUM 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MAXNUM 236
+#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MAXNUM_MCDI2 1004
+
+/* MC_CMD_MAE_OUTER_RULE_INSERT_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_LEN 4
+/* enum: An outer match ID that is guaranteed never to represent an outer match
+ */
+#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_REMOVE
+ */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE 0x15b
+#undef MC_CMD_0x15b_PRIVILEGE_CTG
+
+#define MC_CMD_0x15b_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_REMOVE_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMIN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMAX 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MINNUM 1
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MAXNUM 32
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_OUTER_RULE_REMOVE_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMIN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMAX 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MINNUM 1
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
+#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_OUTER_RULE_UPDATE
+ * Atomically change the response of an Outer Rule.
+ */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
+#undef MC_CMD_0x17d_PRIVILEGE_CTG
+
+#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
+/* ID of outer rule to update */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
+/* Packets matching the rule will be parsed with this encapsulation. */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MAE_MCDI_ENCAP_TYPE */
+/* This field controls the actions that are performed when a rule is hit. */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
+/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
+ * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
+ */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
+
+/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
+
+/* MAE_ACTION_RULE_RESPONSE structuredef */
+#define MAE_ACTION_RULE_RESPONSE_LEN 16
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_LBN 0
+#define MAE_ACTION_RULE_RESPONSE_ASL_ID_WIDTH 32
+/* Only one of ASL_ID or AS_ID may have a non-NULL value. */
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_OFST 4
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_LBN 32
+#define MAE_ACTION_RULE_RESPONSE_AS_ID_WIDTH 32
+/* Controls lookup flow when this rule is hit. See sub-fields for details. More
+ * info on the lookup sequence can be found in SF-122976-TC. It is an error to
+ * set both DO_CT and DO_RECIRC.
+ */
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_LBN 0
+#define MAE_ACTION_RULE_RESPONSE_DO_CT_WIDTH 1
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_LBN 1
+#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_WIDTH 1
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_LBN 2
+#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_WIDTH 2
+/* Enum values, see field(s): */
+/* MAE_CT_VNI_MODE */
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_LBN 8
+#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_WIDTH 8
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_OFST 8
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_LBN 16
+#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_WIDTH 16
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_LBN 64
+#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_WIDTH 32
+/* Counter ID to increment if DO_CT or DO_RECIRC is set. Must be set to
+ * COUNTER_ID_NULL otherwise. Counter ID must have been allocated with
+ * COUNTER_TYPE=AR.
+ */
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_OFST 12
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_LEN 4
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_LBN 96
+#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_INSERT
+ * Insert a rule specify that packets matching a filter be processed according
+ * to a previous allocated action. Masks can be set as indicated by
+ * MC_CMD_MAE_GET_MATCH_FIELD_CAPABILITIES. If the maximum number of rules have
+ * already been inserted then the command will fail with MC_CMD_ERR_ENOSPC.
+ */
+#define MC_CMD_MAE_ACTION_RULE_INSERT 0x15c
+#undef MC_CMD_0x15c_PRIVILEGE_CTG
+
+#define MC_CMD_0x15c_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_INSERT_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMIN 28
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX 252
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(num) (28+1*(num))
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_NUM(len) (((len)-28)/1)
+/* See MC_CMD_MAE_OUTER_RULE_REGISTER_IN/PRIO. */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_PRIO_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_PRIO_LEN 4
+/* Structure of the format MAE_ACTION_RULE_RESPONSE */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_OFST 4
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_LEN 20
+/* Reserved for future use. Must be set to zero. */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RSVD_OFST 24
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RSVD_LEN 4
+/* Structure of the format MAE_FIELD_MASK_VALUE_PAIRS */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_OFST 28
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_LEN 1
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MINNUM 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MAXNUM 224
+#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MAXNUM_MCDI2 992
+
+/* MC_CMD_MAE_ACTION_RULE_INSERT_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_LEN 4
+/* enum: An action rule ID that is guaranteed never to represent an action rule
+ */
+#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_UPDATE
+ * Atomically change the response of an action rule. Firmware may return
+ * ENOTSUP, in which case the driver should DELETE/INSERT.
+ */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE 0x15d
+#undef MC_CMD_0x15d_PRIVILEGE_CTG
+
+#define MC_CMD_0x15d_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_UPDATE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_LEN 24
+/* ID of action rule to update */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_AR_ID_LEN 4
+/* Structure of the format MAE_ACTION_RULE_RESPONSE */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_RESPONSE_OFST 4
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_RESPONSE_LEN 20
+
+/* MC_CMD_MAE_ACTION_RULE_UPDATE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAE_ACTION_RULE_DELETE
+ */
+#define MC_CMD_MAE_ACTION_RULE_DELETE 0x155
+#undef MC_CMD_0x155_PRIVILEGE_CTG
+
+#define MC_CMD_0x155_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_ACTION_RULE_DELETE_IN msgrequest */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMIN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMAX 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MAXNUM_MCDI2 32
+
+/* MC_CMD_MAE_ACTION_RULE_DELETE_OUT msgresponse */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMIN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMAX 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMAX_MCDI2 128
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_NUM(len) (((len)-0)/4)
+/* Same semantics as MC_CMD_MAE_COUNTER_FREE */
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_OFST 0
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_LEN 4
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MINNUM 1
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MAXNUM 32
+#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_LOOKUP
+ * Return the m-port corresponding to a selector.
+ */
+#define MC_CMD_MAE_MPORT_LOOKUP 0x160
+#undef MC_CMD_0x160_PRIVILEGE_CTG
+
+#define MC_CMD_0x160_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_MPORT_LOOKUP_IN msgrequest */
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_LEN 4
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR_OFST 0
+#define MC_CMD_MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR_LEN 4
+
+/* MC_CMD_MAE_MPORT_LOOKUP_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN 4
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_LOOKUP_OUT_MPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_ALLOC
+ * Allocates a m-port, which can subsequently be used in action rules as a
+ * match or delivery argument.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC 0x163
+#undef MC_CMD_0x163_PRIVILEGE_CTG
+
+#define MC_CMD_0x163_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_ALLOC_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_LEN 20
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_IN_UUID_LEN 16
+
+/* MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN 24
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_UUID_LEN 16
+/* An m-port selector identifying the VNIC to which traffic should be
+ * delivered. This must currently be set to MAE_MPORT_SELECTOR_ASSIGNED (i.e.
+ * the m-port assigned to the calling client).
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT_OFST 20
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_VNIC_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_LEN 20
+/* The type of m-port to allocate. Firmware may return ENOTSUP for certain
+ * types.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_TYPE_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_TYPE_LEN 4
+/* enum: Traffic can be sent to this type of m-port using an override
+ * descriptor. Traffic received on this type of m-port will go to the VNIC on a
+ * nominated m-port, and will be delivered with metadata identifying the alias
+ * m-port.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_MPORT_TYPE_ALIAS 0x1
+/* enum: This type of m-port has a VNIC attached. Queues can be created on this
+ * VNIC by specifying the created m-port as an m-port selector at queue
+ * creation time.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_MPORT_TYPE_VNIC 0x2
+/* 128-bit value for use by the driver. */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_UUID_OFST 4
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_UUID_LEN 16
+
+/* MC_CMD_MAE_MPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_LEN 4
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_OUT_MPORT_ID_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN 24
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID_LEN 4
+/* A value that will appear in the packet metadata for any packets delivered
+ * using an alias type m-port. This value is guaranteed unique on the VNIC
+ * being delivered to, and is guaranteed not to exceed the range of values
+ * representable in the relevant metadata field.
+ */
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LABEL_OFST 20
+#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LABEL_LEN 4
+
+/* MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT msgrequest */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_LEN 4
+/* ID of newly-allocated m-port. */
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_MPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_FREE
+ * Free a m-port which was previously allocated by the driver.
+ */
+#define MC_CMD_MAE_MPORT_FREE 0x164
+#undef MC_CMD_0x164_PRIVILEGE_CTG
+
+#define MC_CMD_0x164_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_FREE_IN msgrequest */
+#define MC_CMD_MAE_MPORT_FREE_IN_LEN 4
+/* MPORT_ID as returned by MC_CMD_MAE_MPORT_ALLOC. */
+#define MC_CMD_MAE_MPORT_FREE_IN_MPORT_ID_OFST 0
+#define MC_CMD_MAE_MPORT_FREE_IN_MPORT_ID_LEN 4
+
+/* MC_CMD_MAE_MPORT_FREE_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_FREE_OUT_LEN 0
+
+/* MAE_MPORT_DESC structuredef */
+#define MAE_MPORT_DESC_LEN 52
+#define MAE_MPORT_DESC_MPORT_ID_OFST 0
+#define MAE_MPORT_DESC_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_MPORT_ID_LBN 0
+#define MAE_MPORT_DESC_MPORT_ID_WIDTH 32
+/* Reserved for future purposes, contains information independent of caller */
+#define MAE_MPORT_DESC_FLAGS_OFST 4
+#define MAE_MPORT_DESC_FLAGS_LEN 4
+#define MAE_MPORT_DESC_FLAGS_LBN 32
+#define MAE_MPORT_DESC_FLAGS_WIDTH 32
+#define MAE_MPORT_DESC_CALLER_FLAGS_OFST 8
+#define MAE_MPORT_DESC_CALLER_FLAGS_LEN 4
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_OFST 8
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_LBN 0
+#define MAE_MPORT_DESC_CAN_RECEIVE_ON_WIDTH 1
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_OFST 8
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_LBN 1
+#define MAE_MPORT_DESC_CAN_DELIVER_TO_WIDTH 1
+#define MAE_MPORT_DESC_CAN_DELETE_OFST 8
+#define MAE_MPORT_DESC_CAN_DELETE_LBN 2
+#define MAE_MPORT_DESC_CAN_DELETE_WIDTH 1
+#define MAE_MPORT_DESC_IS_ZOMBIE_OFST 8
+#define MAE_MPORT_DESC_IS_ZOMBIE_LBN 3
+#define MAE_MPORT_DESC_IS_ZOMBIE_WIDTH 1
+#define MAE_MPORT_DESC_CALLER_FLAGS_LBN 64
+#define MAE_MPORT_DESC_CALLER_FLAGS_WIDTH 32
+/* Not the ideal name; it's really the type of thing connected to the m-port */
+#define MAE_MPORT_DESC_MPORT_TYPE_OFST 12
+#define MAE_MPORT_DESC_MPORT_TYPE_LEN 4
+/* enum: Connected to a MAC... */
+#define MAE_MPORT_DESC_MPORT_TYPE_NET_PORT 0x0
+/* enum: Adds metadata and delivers to another m-port */
+#define MAE_MPORT_DESC_MPORT_TYPE_ALIAS 0x1
+/* enum: Connected to a VNIC. */
+#define MAE_MPORT_DESC_MPORT_TYPE_VNIC 0x2
+#define MAE_MPORT_DESC_MPORT_TYPE_LBN 96
+#define MAE_MPORT_DESC_MPORT_TYPE_WIDTH 32
+/* 128-bit value available to drivers for m-port identification. */
+#define MAE_MPORT_DESC_UUID_OFST 16
+#define MAE_MPORT_DESC_UUID_LEN 16
+#define MAE_MPORT_DESC_UUID_LBN 128
+#define MAE_MPORT_DESC_UUID_WIDTH 128
+/* Big wadge of space reserved for other common properties */
+#define MAE_MPORT_DESC_RESERVED_OFST 32
+#define MAE_MPORT_DESC_RESERVED_LEN 8
+#define MAE_MPORT_DESC_RESERVED_LO_OFST 32
+#define MAE_MPORT_DESC_RESERVED_LO_LEN 4
+#define MAE_MPORT_DESC_RESERVED_LO_LBN 256
+#define MAE_MPORT_DESC_RESERVED_LO_WIDTH 32
+#define MAE_MPORT_DESC_RESERVED_HI_OFST 36
+#define MAE_MPORT_DESC_RESERVED_HI_LEN 4
+#define MAE_MPORT_DESC_RESERVED_HI_LBN 288
+#define MAE_MPORT_DESC_RESERVED_HI_WIDTH 32
+#define MAE_MPORT_DESC_RESERVED_LBN 256
+#define MAE_MPORT_DESC_RESERVED_WIDTH 64
+/* Logical port index. Only valid when type NET Port. */
+#define MAE_MPORT_DESC_NET_PORT_IDX_OFST 40
+#define MAE_MPORT_DESC_NET_PORT_IDX_LEN 4
+#define MAE_MPORT_DESC_NET_PORT_IDX_LBN 320
+#define MAE_MPORT_DESC_NET_PORT_IDX_WIDTH 32
+/* The m-port delivered to */
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_OFST 40
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_LBN 320
+#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_WIDTH 32
+/* The type of thing that owns the VNIC */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_OFST 40
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_LEN 4
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_LBN 320
+#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_WIDTH 32
+/* The PCIe interface on which the function lives. CJK: We need an enumeration
+ * of interfaces that we extend as new interface (types) appear. This belongs
+ * elsewhere and should be referenced from here
+ */
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_OFST 44
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_LEN 4
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_LBN 352
+#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_WIDTH 32
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_OFST 48
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_LEN 2
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_LBN 384
+#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_WIDTH 16
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_OFST 50
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_LEN 2
+/* enum: Indicates that the function is a PF */
+#define MAE_MPORT_DESC_VF_IDX_NULL 0xffff
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_LBN 400
+#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_WIDTH 16
+/* Reserved. Should be ignored for now. */
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_OFST 44
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LEN 4
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
+#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
+
+/* MAE_MPORT_DESC_V2 structuredef */
+#define MAE_MPORT_DESC_V2_LEN 56
+#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
+#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
+#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
+/* Reserved for future purposes, contains information independent of caller */
+#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
+#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
+#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
+#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
+#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
+#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
+#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
+#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
+#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
+#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
+#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
+/* Not the ideal name; it's really the type of thing connected to the m-port */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
+/* enum: Connected to a MAC... */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
+/* enum: Adds metadata and delivers to another m-port */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
+/* enum: Connected to a VNIC. */
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
+#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
+/* 128-bit value available to drivers for m-port identification. */
+#define MAE_MPORT_DESC_V2_UUID_OFST 16
+#define MAE_MPORT_DESC_V2_UUID_LEN 16
+#define MAE_MPORT_DESC_V2_UUID_LBN 128
+#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
+/* Big wadge of space reserved for other common properties */
+#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
+#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
+#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
+#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
+#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
+#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
+#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
+#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
+#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
+#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
+#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
+#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
+/* Logical port index. Only valid when type NET Port. */
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
+#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
+/* The m-port delivered to */
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
+#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
+/* The type of thing that owns the VNIC */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
+/* The PCIe interface on which the function lives. CJK: We need an enumeration
+ * of interfaces that we extend as new interface (types) appear. This belongs
+ * elsewhere and should be referenced from here
+ */
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
+/* enum: Indicates that the function is a PF */
+#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
+#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
+/* Reserved. Should be ignored for now. */
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
+#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
+/* A client handle for the VNIC's owner. Only valid for type VNIC. */
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
+#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_ENUMERATE
+ * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
+ * will be removed at some future point.
+ */
+#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
+#undef MC_CMD_0x17c_PRIVILEGE_CTG
+
+#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
+#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
+
+/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
+/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
+ * grow in future version of this command. Drivers should use a stride of
+ * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
+ */
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
+#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
+
+
+/***********************************/
+/* MC_CMD_MAE_MPORT_READ_JOURNAL
+ * Firmware maintains a per-client journal of mport creations and deletions.
+ * This journal is clear-on-read, i.e. repeated calls of this command will
+ * drain the buffer. Whenever the caller resets its function via FLR or
+ * MC_CMD_ENTITY_RESET, the journal is regenerated from a blank start.
+ */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL 0x147
+#undef MC_CMD_0x147_PRIVILEGE_CTG
+
+#define MC_CMD_0x147_PRIVILEGE_CTG SRIOV_CTG_MAE
+
+/* MC_CMD_MAE_MPORT_READ_JOURNAL_IN msgrequest */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_LEN 4
+/* Any unused flags are reserved and must be set to zero. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_FLAGS_LEN 4
+
+/* MC_CMD_MAE_MPORT_READ_JOURNAL_OUT msgresponse */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMIN 12
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX 252
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_FLAGS_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_FLAGS_LEN 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_OFST 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_LBN 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_WIDTH 1
+/* The number of MAE_MPORT_DESC structures in MPORT_DESC_DATA. May be zero. */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT_OFST 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT_LEN 4
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC_OFST 8
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC_LEN 4
+/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
+ * grow in future version of this command. Drivers should use a stride of
+ * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
+ */
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST 12
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_LEN 1
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MINNUM 0
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MAXNUM 240
+#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1008
+
+/* TABLE_FIELD_DESCR structuredef: An individual table field descriptor. This
+ * describes the location and properties of one N-bit field within a wider
+ * M-bit key/mask/response value.
+ */
+#define TABLE_FIELD_DESCR_LEN 8
+/* Identifier for this field. */
+#define TABLE_FIELD_DESCR_FIELD_ID_OFST 0
+#define TABLE_FIELD_DESCR_FIELD_ID_LEN 2
+/* Enum values, see field(s): */
+/* TABLE_FIELD_ID */
+#define TABLE_FIELD_DESCR_FIELD_ID_LBN 0
+#define TABLE_FIELD_DESCR_FIELD_ID_WIDTH 16
+/* Lowest (least significant) bit number of the bits of this field. */
+#define TABLE_FIELD_DESCR_LBN_OFST 2
+#define TABLE_FIELD_DESCR_LBN_LEN 2
+#define TABLE_FIELD_DESCR_LBN_LBN 16
+#define TABLE_FIELD_DESCR_LBN_WIDTH 16
+/* Width of this field in bits. */
+#define TABLE_FIELD_DESCR_WIDTH_OFST 4
+#define TABLE_FIELD_DESCR_WIDTH_LEN 2
+#define TABLE_FIELD_DESCR_WIDTH_LBN 32
+#define TABLE_FIELD_DESCR_WIDTH_WIDTH 16
+/* The mask type for this field. (Note that masking is relevant to keys; fields
+ * of responses are always reported with the EXACT type.)
+ */
+#define TABLE_FIELD_DESCR_MASK_TYPE_OFST 6
+#define TABLE_FIELD_DESCR_MASK_TYPE_LEN 1
+/* enum: Field must never be selected in the mask. */
+#define TABLE_FIELD_DESCR_MASK_NEVER 0x0
+/* enum: Exact match: field must always be selected in the mask. */
+#define TABLE_FIELD_DESCR_MASK_EXACT 0x1
+/* enum: Ternary match: arbitrary mask bits are allowed. */
+#define TABLE_FIELD_DESCR_MASK_TERNARY 0x2
+/* enum: Whole field match: mask must be all 1 bits, or all 0 bits. */
+#define TABLE_FIELD_DESCR_MASK_WHOLE_FIELD 0x3
+/* enum: Longest prefix match: mask must be 1 bit(s) followed by 0 bit(s). */
+#define TABLE_FIELD_DESCR_MASK_LPM 0x4
+#define TABLE_FIELD_DESCR_MASK_TYPE_LBN 48
+#define TABLE_FIELD_DESCR_MASK_TYPE_WIDTH 8
+/* A version code that allows field semantics to be extended. All fields
+ * currently use version 0.
+ */
+#define TABLE_FIELD_DESCR_SCHEME_OFST 7
+#define TABLE_FIELD_DESCR_SCHEME_LEN 1
+#define TABLE_FIELD_DESCR_SCHEME_LBN 56
+#define TABLE_FIELD_DESCR_SCHEME_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_TABLE_LIST
+ * Return the list of tables which may be accessed via this table API.
+ */
+#define MC_CMD_TABLE_LIST 0x1c9
+#undef MC_CMD_0x1c9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_LIST_IN msgrequest */
+#define MC_CMD_TABLE_LIST_IN_LEN 4
+/* Index of the first item to be returned in the TABLE_ID sequence. (Set to 0
+ * for the first call; further calls are only required if the whole sequence
+ * does not fit within the maximum MCDI message size.)
+ */
+#define MC_CMD_TABLE_LIST_IN_FIRST_TABLE_ID_INDEX_OFST 0
+#define MC_CMD_TABLE_LIST_IN_FIRST_TABLE_ID_INDEX_LEN 4
+
+/* MC_CMD_TABLE_LIST_OUT msgresponse */
+#define MC_CMD_TABLE_LIST_OUT_LENMIN 4
+#define MC_CMD_TABLE_LIST_OUT_LENMAX 252
+#define MC_CMD_TABLE_LIST_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_LIST_OUT_LEN(num) (4+4*(num))
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(len) (((len)-4)/4)
+/* The total number of tables. */
+#define MC_CMD_TABLE_LIST_OUT_N_TABLES_OFST 0
+#define MC_CMD_TABLE_LIST_OUT_N_TABLES_LEN 4
+/* A sequence of table identifiers. If all N_TABLES items do not fit, further
+ * items can be obtained by repeating the call with a non-zero
+ * FIRST_TABLE_ID_INDEX.
+ */
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_OFST 4
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_LEN 4
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MINNUM 0
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MAXNUM 62
+#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MAXNUM_MCDI2 254
+/* Enum values, see field(s): */
+/* TABLE_ID */
+
+
+/***********************************/
+/* MC_CMD_TABLE_DESCRIPTOR
+ * Request the table descriptor for a particular table. This describes
+ * properties of the table and the format of the key and response. May return
+ * EINVAL for unknown table ID.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR 0x1ca
+#undef MC_CMD_0x1ca_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ca_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_LEN 8
+/* Identifier for this field. */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_DESCRIPTOR_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Index of the first item to be returned in the FIELDS sequence. (Set to 0 for
+ * the first call; further calls are only required if the whole sequence does
+ * not fit within the maximum MCDI message size.)
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX_OFST 4
+#define MC_CMD_TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX_LEN 4
+
+/* MC_CMD_TABLE_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_NUM(len) (((len)-20)/8)
+/* Maximum number of entries in this table. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_ENTRIES_OFST 0
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_ENTRIES_LEN 4
+/* The type of table. (This is really just informational; the important
+ * properties of a table that affect programming can be deduced from other
+ * items in the table or field descriptor.)
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_OFST 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_LEN 2
+/* enum: Direct table (essentially just an array). Behaves like a BCAM for
+ * programming purposes, where the fact that the key is actually used as an
+ * array index is really just an implementation detail.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_DIRECT 0x1
+/* enum: BCAM (binary CAM) table: exact match on all key fields." */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_BCAM 0x2
+/* enum: TCAM (ternary CAM) table: matches fields with a mask. Each entry may
+ * have its own different mask.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_TCAM 0x3
+/* enum: STCAM (semi-TCAM) table: like a TCAM but entries shared a limited
+ * number of unique masks.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_STCAM 0x4
+/* Width of key (and corresponding mask, for TCAM or STCAM) in bits. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_KEY_WIDTH_OFST 6
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_KEY_WIDTH_LEN 2
+/* Width of response in bits. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_RESP_WIDTH_LEN 2
+/* The total number of fields in the key. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS_OFST 10
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS_LEN 2
+/* The total number of fields in the response. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS_OFST 12
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS_LEN 2
+/* Number of priorities for STCAM or TCAM; otherwise 0. The priority of a table
+ * entry (relevant when more than one masked entry matches) ranges from
+ * 0=highest to N_PRIORITIES-1=lowest.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_PRIORITIES_OFST 14
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_PRIORITIES_LEN 2
+/* Maximum number of masks for STCAM; otherwise 0. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_MASKS_OFST 16
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_MASKS_LEN 2
+/* Flags. */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FLAGS_OFST 18
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FLAGS_LEN 1
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_OFST 18
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_LBN 0
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_WIDTH 1
+/* Access scheme version code, allowing the method of accessing table entries
+ * to change semantics in future. A client which does not understand the value
+ * of this field should assume that it cannot program this table. Currently
+ * always set to 0 indicating the original MC_CMD_TABLE_INSERT/UPDATE/DELETE
+ * semantics.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_SCHEME_OFST 19
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_SCHEME_LEN 1
+/* A sequence of TABLE_FIELD_DESCR structures: N_KEY_FIELDS items describing
+ * the key, followed by N_RESP_FIELDS items describing the response. If all
+ * N_KEY_FIELDS+N_RESP_FIELDS items do not fit, further items can be obtained
+ * by repeating the call with a non-zero FIRST_FIELDS_INDEX.
+ */
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_OFST 20
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LEN 8
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_OFST 20
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_LEN 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_LBN 160
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_WIDTH 32
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_OFST 24
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_LEN 4
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_LBN 192
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_WIDTH 32
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MINNUM 1
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MAXNUM 29
+#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_TABLE_INSERT
+ * Insert a new entry into a table. The entry must not currently exist. May
+ * return EINVAL for unknown table ID or other bad request parameters, EEXIST
+ * if the entry already exists, ENOSPC if there is no space or EPERM if the
+ * operation is not permitted. In case of an error, the additional MCDI error
+ * argument field returns the raw error code from the underlying CAM driver.
+ */
+#define MC_CMD_TABLE_INSERT 0x1cd
+#undef MC_CMD_0x1cd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1cd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_INSERT_IN msgrequest */
+#define MC_CMD_TABLE_INSERT_IN_LENMIN 16
+#define MC_CMD_TABLE_INSERT_IN_LENMAX 252
+#define MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_INSERT_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_INSERT_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_INSERT_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_INSERT_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_INSERT_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_INSERT_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_INSERT_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_INSERT_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_INSERT_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_INSERT_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_INSERT_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_INSERT_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_INSERT_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_INSERT_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_INSERT_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_INSERT_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_INSERT_IN_DATA_OFST 12
+#define MC_CMD_TABLE_INSERT_IN_DATA_LEN 4
+#define MC_CMD_TABLE_INSERT_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_INSERT_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_INSERT_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_INSERT_OUT msgresponse */
+#define MC_CMD_TABLE_INSERT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TABLE_UPDATE
+ * Update an existing entry in a table with a new response value. May return
+ * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
+ * entry does not already exist, or EPERM if the operation is not permitted. In
+ * case of an error, the additional MCDI error argument field returns the raw
+ * error code from the underlying CAM driver.
+ */
+#define MC_CMD_TABLE_UPDATE 0x1ce
+#undef MC_CMD_0x1ce_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_UPDATE_IN msgrequest */
+#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
+#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
+#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
+#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
+#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TABLE_DELETE
+ * Delete an existing entry in a table. May return EINVAL for unknown table ID
+ * or other bad request parameters, ENOENT if the entry does not exist, or
+ * EPERM if the operation is not permitted. In case of an error, the additional
+ * MCDI error argument field returns the raw error code from the underlying CAM
+ * driver.
+ */
+#define MC_CMD_TABLE_DELETE 0x1cf
+#undef MC_CMD_0x1cf_PRIVILEGE_CTG
+
+#define MC_CMD_0x1cf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TABLE_DELETE_IN msgrequest */
+#define MC_CMD_TABLE_DELETE_IN_LENMIN 16
+#define MC_CMD_TABLE_DELETE_IN_LENMAX 252
+#define MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2 1020
+#define MC_CMD_TABLE_DELETE_IN_LEN(num) (12+4*(num))
+#define MC_CMD_TABLE_DELETE_IN_DATA_NUM(len) (((len)-12)/4)
+/* Table identifier. */
+#define MC_CMD_TABLE_DELETE_IN_TABLE_ID_OFST 0
+#define MC_CMD_TABLE_DELETE_IN_TABLE_ID_LEN 4
+/* Enum values, see field(s): */
+/* TABLE_ID */
+/* Width in bits of supplied key data (must match table properties). */
+#define MC_CMD_TABLE_DELETE_IN_KEY_WIDTH_OFST 4
+#define MC_CMD_TABLE_DELETE_IN_KEY_WIDTH_LEN 2
+/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
+ * when allocated MASK_ID is used instead).
+ */
+#define MC_CMD_TABLE_DELETE_IN_MASK_WIDTH_OFST 6
+#define MC_CMD_TABLE_DELETE_IN_MASK_WIDTH_LEN 2
+/* Width in bits of supplied response data (for INSERT and UPDATE operations
+ * this must match the table properties; for DELETE operations, no response
+ * data is required and this must be 0).
+ */
+#define MC_CMD_TABLE_DELETE_IN_RESP_WIDTH_OFST 8
+#define MC_CMD_TABLE_DELETE_IN_RESP_WIDTH_LEN 2
+/* Mask ID for STCAM table - used instead of mask data if the table descriptor
+ * reports ALLOC_MASKS==1. Otherwise set to 0.
+ */
+#define MC_CMD_TABLE_DELETE_IN_MASK_ID_OFST 6
+#define MC_CMD_TABLE_DELETE_IN_MASK_ID_LEN 2
+/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
+#define MC_CMD_TABLE_DELETE_IN_PRIORITY_OFST 8
+#define MC_CMD_TABLE_DELETE_IN_PRIORITY_LEN 2
+/* (32-bit alignment padding - set to 0) */
+#define MC_CMD_TABLE_DELETE_IN_RESERVED_OFST 10
+#define MC_CMD_TABLE_DELETE_IN_RESERVED_LEN 2
+/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
+ * data values. Each of these items is logically treated as a single wide N-bit
+ * value, in which the individual fields have been placed within that value per
+ * the LBN and WIDTH information from the table field descriptors. The wide
+ * N-bit value is padded with 0 bits at the MSB end if necessary to make a
+ * multiple of 32 bits. The value is then packed into this command as a
+ * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
+ */
+#define MC_CMD_TABLE_DELETE_IN_DATA_OFST 12
+#define MC_CMD_TABLE_DELETE_IN_DATA_LEN 4
+#define MC_CMD_TABLE_DELETE_IN_DATA_MINNUM 1
+#define MC_CMD_TABLE_DELETE_IN_DATA_MAXNUM 60
+#define MC_CMD_TABLE_DELETE_IN_DATA_MAXNUM_MCDI2 252
+
+/* MC_CMD_TABLE_DELETE_OUT msgresponse */
+#define MC_CMD_TABLE_DELETE_OUT_LEN 0
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol_mae.h b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
new file mode 100644
index 000000000000..ff6d80c8e486
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2022 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef MCDI_PCOL_MAE_H
+#define MCDI_PCOL_MAE_H
+/* MCDI definitions for Match-Action Engine functionality, that are
+ * missing from the main mcdi_pcol.h
+ */
+
+/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the
+ * following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC.
+ */
+/* enum: A counter ID that is guaranteed never to represent a real counter */
+#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff
+
+#endif /* MCDI_PCOL_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 94c6a345c0b1..ad4694fa3dda 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -20,7 +20,7 @@
static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
@@ -46,7 +46,7 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
static int efx_mcdi_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 723bbeea5d0c..7ef823d7a89a 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -178,6 +178,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */
+#define EFX_TX_BUF_EFV 0x100 /* buffer was sent from representor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -477,6 +478,8 @@ enum efx_sync_events_state {
* @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
* @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
+ * @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
+ * not recognised
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -539,6 +542,7 @@ struct efx_channel {
unsigned int n_rx_xdp_bad_drops;
unsigned int n_rx_xdp_tx;
unsigned int n_rx_xdp_redirect;
+ unsigned int n_rx_mport_bad;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -622,12 +626,55 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state {
- STATE_UNINIT = 0, /* device being probed/removed or is frozen */
- STATE_READY = 1, /* hardware ready and netdev registered */
- STATE_DISABLED = 2, /* device disabled due to hardware errors */
- STATE_RECOVERY = 3, /* device recovering from PCI error */
+ STATE_UNINIT = 0, /* device being probed/removed */
+ STATE_PROBED, /* hardware probed */
+ STATE_NET_DOWN, /* netdev registered */
+ STATE_NET_UP, /* ready for traffic */
+ STATE_DISABLED, /* device disabled due to hardware errors */
+
+ STATE_RECOVERY = 0x100,/* recovering from PCI error */
+ STATE_FROZEN = 0x200, /* frozen by power management */
};
+static inline bool efx_net_active(enum nic_state state)
+{
+ return state == STATE_NET_DOWN || state == STATE_NET_UP;
+}
+
+static inline bool efx_frozen(enum nic_state state)
+{
+ return state & STATE_FROZEN;
+}
+
+static inline bool efx_recovering(enum nic_state state)
+{
+ return state & STATE_RECOVERY;
+}
+
+static inline enum nic_state efx_freeze(enum nic_state state)
+{
+ WARN_ON(!efx_net_active(state));
+ return state | STATE_FROZEN;
+}
+
+static inline enum nic_state efx_thaw(enum nic_state state)
+{
+ WARN_ON(!efx_frozen(state));
+ return state & ~STATE_FROZEN;
+}
+
+static inline enum nic_state efx_recover(enum nic_state state)
+{
+ WARN_ON(!efx_net_active(state));
+ return state | STATE_RECOVERY;
+}
+
+static inline enum nic_state efx_recovered(enum nic_state state)
+{
+ WARN_ON(!efx_recovering(state));
+ return state & ~STATE_RECOVERY;
+}
+
/* Forward declaration */
struct efx_nic;
@@ -923,12 +970,15 @@ enum efx_xdp_tx_queues_mode {
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
+ * @vf_reps_lock: Protects vf_reps list
+ * @vf_reps: local VF reps
* @ptp_data: PTP state data
* @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
* xdp_rxq_info structures?
* @netdev_notifier: Netdevice notifier.
+ * @tc: state for TC offload (EF100).
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem
@@ -1102,6 +1152,8 @@ struct efx_nic {
unsigned vf_init_count;
unsigned vi_scale;
#endif
+ spinlock_t vf_reps_lock;
+ struct list_head vf_reps;
struct efx_ptp_data *ptp_data;
bool ptp_warned;
@@ -1110,6 +1162,7 @@ struct efx_nic {
bool xdp_rxq_info_failed;
struct notifier_block netdev_notifier;
+ struct efx_tc_state *tc;
unsigned int mem_bar;
u32 reg_base;
@@ -1123,6 +1176,24 @@ struct efx_nic {
atomic_t n_rx_noskb_drops;
};
+/**
+ * struct efx_probe_data - State after hardware probe
+ * @pci_dev: The PCI device
+ * @efx: Efx NIC details
+ */
+struct efx_probe_data {
+ struct pci_dev *pci_dev;
+ struct efx_nic efx;
+};
+
+static inline struct efx_nic *efx_netdev_priv(struct net_device *dev)
+{
+ struct efx_probe_data **probe_ptr = netdev_priv(dev);
+ struct efx_probe_data *probe_data = *probe_ptr;
+
+ return &probe_data->efx;
+}
+
static inline int efx_dev_registered(struct efx_nic *efx)
{
return efx->net_dev->reg_state == NETREG_REGISTERED;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index fa8b9aacca11..4826e6a7e4ce 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -793,7 +793,6 @@ int efx_probe_filters(struct efx_nic *efx)
int rc;
mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
goto out_unlock;
@@ -830,7 +829,6 @@ int efx_probe_filters(struct efx_nic *efx)
}
#endif
out_unlock:
- up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -846,9 +844,7 @@ void efx_remove_filters(struct efx_nic *efx)
channel->rps_flow_id = NULL;
}
#endif
- down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
- up_write(&efx->filter_sem);
}
#ifdef CONFIG_RFS_ACCEL
@@ -857,7 +853,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
{
struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
work);
- struct efx_nic *efx = netdev_priv(req->net_dev);
+ struct efx_nic *efx = efx_netdev_priv(req->net_dev);
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
int slot_idx = req - efx->rps_slot;
struct efx_arfs_rule *rule;
@@ -942,7 +938,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
struct efx_arfs_rule *rule;
struct flow_keys fk;
diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c
index cce23803c652..89ccd65c978b 100644
--- a/drivers/net/ethernet/sfc/siena/farch.c
+++ b/drivers/net/ethernet/sfc/siena/farch.c
@@ -2778,7 +2778,7 @@ void efx_farch_filter_table_remove(struct efx_nic *efx)
enum efx_farch_filter_table_id table_id;
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
+ bitmap_free(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
@@ -2822,9 +2822,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
table = &state->table[table_id];
if (table->size == 0)
continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
+ table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(array_size(sizeof(*table->spec),
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c
index 3df0f0eca3b7..3f7899daa86a 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi.c
+++ b/drivers/net/ethernet/sfc/siena/mcdi.c
@@ -1264,7 +1264,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
}
/* The MC is going down in to BIST mode. set the BIST flag to block
- * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset
* (which doesn't actually execute a reset, it waits for the controlling
* function to reset it).
*/
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
index 89a7fd47b057..a3cc8b7ec732 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -274,7 +274,7 @@
* MC_CMD_WORKAROUND_BUG26807.
* May also returned for other operations such as sub-variant switching. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* The clock whose frequency you've attempted to set set
+/* The clock whose frequency you've attempted to set
* doesn't exist on this NIC */
#define MC_CMD_ERR_NO_CLOCK 0x1015
/* Returned by MC_CMD_TESTASSERT if the action that should
@@ -7782,7 +7782,7 @@
* large number (253) it is not anticipated that this will be needed in the
* near future, so can currently be ignored.
*
- * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * On Riverhead this command is implemented as a wrapper for `list` in the
* sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
@@ -7827,7 +7827,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for
+ * On Riverhead this command is implemented as a wrapper for
* `get_descriptions` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
@@ -7876,7 +7876,7 @@
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
- * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * On Riverhead this command is implemented as a wrapper for `get_readings`
* in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
@@ -16682,7 +16682,7 @@
* TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
* contains all modes implemented in firmware for a particular board. Modes
* listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
* should be considered hidden (not to be exposed in userland tools) and for
* engineering use only. There are no other semantic differences and any mode
* listed in either MODES or ENGINEERING_MODES can be set on the board.
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index c4a97fbf4672..ff7bbc325952 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -838,7 +838,7 @@ enum efx_xdp_tx_queues_mode {
* @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
* @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
* @rx_ip_align: RX DMA address offset to have IP header aligned in
- * in accordance with NET_IP_ALIGN
+ * accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 3f241e6c881a..fc9f0189f285 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -10,7 +10,7 @@
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_mac)
return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
@@ -21,7 +21,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
u8 qos, __be16 vlan_proto)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_vlan) {
if ((vlan & ~VLAN_VID_MASK) ||
@@ -40,7 +40,7 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_spoofchk)
return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
@@ -51,7 +51,7 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_get_vf_config)
return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
@@ -62,7 +62,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
int link_state)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
if (efx->type->sriov_set_vf_link_state)
return efx->type->sriov_set_vf_link_state(efx, vf_i,
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
new file mode 100644
index 000000000000..0c0aeb91f500
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc.h"
+#include "mae.h"
+#include "ef100_rep.h"
+#include "efx.h"
+
+static void efx_tc_free_action_set(struct efx_nic *efx,
+ struct efx_tc_action_set *act, bool in_hw)
+{
+ /* Failure paths calling this on the 'running action' set in_hw=false,
+ * because if the alloc had succeeded we'd've put it in acts.list and
+ * not still have it in act.
+ */
+ if (in_hw) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ /* in_hw is true iff we are on an acts.list; make sure to
+ * remove ourselves from that list before we are freed.
+ */
+ list_del(&act->list);
+ }
+ kfree(act);
+}
+
+static void efx_tc_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts,
+ bool in_hw)
+{
+ struct efx_tc_action_set *act, *next;
+
+ /* Failure paths set in_hw=false, because usually the acts didn't get
+ * to efx_mae_alloc_action_set_list(); if they did, the failure tree
+ * has a separate efx_mae_free_action_set_list() before calling us.
+ */
+ if (in_hw)
+ efx_mae_free_action_set_list(efx, acts);
+ /* Any act that's on the list will be in_hw even if the list isn't */
+ list_for_each_entry_safe(act, next, &acts->list, list)
+ efx_tc_free_action_set(efx, act, true);
+ /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
+}
+
+static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
+{
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
+ u32 eg_port, struct efx_tc_flow_rule *rule)
+{
+ struct efx_tc_action_set_list *acts = &rule->acts;
+ struct efx_tc_match *match = &rule->match;
+ struct efx_tc_action_set *act;
+ int rc;
+
+ match->value.ingress_port = ing_port;
+ match->mask.ingress_port = ~0;
+ act = kzalloc(sizeof(*act), GFP_KERNEL);
+ if (!act)
+ return -ENOMEM;
+ act->deliver = 1;
+ act->dest_mport = eg_port;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc)
+ goto fail1;
+ EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
+ list_add_tail(&act->list, &acts->list);
+ rc = efx_mae_alloc_action_set_list(efx, acts);
+ if (rc)
+ goto fail2;
+ rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
+ acts->fw_id, &rule->fw_id);
+ if (rc)
+ goto fail3;
+ return 0;
+fail3:
+ efx_mae_free_action_set_list(efx, acts);
+fail2:
+ list_del(&act->list);
+ efx_mae_free_action_set(efx, act->fw_id);
+fail1:
+ kfree(act);
+ return rc;
+}
+
+static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_uplink(efx, &ing_port);
+ efx_mae_mport_wire(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_wire(efx, &ing_port);
+ efx_mae_mport_uplink(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
+{
+ struct efx_tc_flow_rule *rule = &efv->dflt;
+ struct efx_nic *efx = efv->parent;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_mport(efx, efv->mport, &ing_port);
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule)
+{
+ if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
+ efx_tc_delete_rule(efx, rule);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_rep_mport(struct efx_nic *efx)
+{
+ u32 rep_mport_label;
+ int rc;
+
+ rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
+ if (rc)
+ return rc;
+ pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
+ efx->tc->reps_mport_id, rep_mport_label);
+ /* Use mport *selector* as vport ID */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &efx->tc->reps_mport_vport_id);
+ return 0;
+}
+
+static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
+{
+ efx_mae_free_mport(efx, efx->tc->reps_mport_id);
+ efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
+}
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx)
+{
+ struct efx_filter_spec promisc, allmulti;
+ int rc;
+
+ if (efx->type->is_vf)
+ return 0;
+ if (!efx->tc)
+ return 0;
+ efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_uc_def(&promisc);
+ efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &promisc, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_uc = rc;
+ efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_mc_def(&allmulti);
+ efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &allmulti, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_mc = rc;
+ return 0;
+}
+
+void efx_tc_remove_rep_filters(struct efx_nic *efx)
+{
+ if (efx->type->is_vf)
+ return;
+ if (!efx->tc)
+ return;
+ if (efx->tc->reps_filter_mc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
+ efx->tc->reps_filter_mc = -1;
+ if (efx->tc->reps_filter_uc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
+ efx->tc->reps_filter_uc = -1;
+}
+
+int efx_init_tc(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_tc_configure_default_rule_pf(efx);
+ if (rc)
+ return rc;
+ rc = efx_tc_configure_default_rule_wire(efx);
+ if (rc)
+ return rc;
+ return efx_tc_configure_rep_mport(efx);
+}
+
+void efx_fini_tc(struct efx_nic *efx)
+{
+ /* We can get called even if efx_init_struct_tc() failed */
+ if (!efx->tc)
+ return;
+ efx_tc_deconfigure_rep_mport(efx);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
+}
+
+int efx_init_struct_tc(struct efx_nic *efx)
+{
+ if (efx->type->is_vf)
+ return 0;
+
+ efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
+ if (!efx->tc)
+ return -ENOMEM;
+
+ efx->tc->reps_filter_uc = -1;
+ efx->tc->reps_filter_mc = -1;
+ INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
+ efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
+ efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ return 0;
+}
+
+void efx_fini_struct_tc(struct efx_nic *efx)
+{
+ if (!efx->tc)
+ return;
+
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ kfree(efx->tc);
+ efx->tc = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
new file mode 100644
index 000000000000..309123c6b386
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_H
+#define EFX_TC_H
+#include "net_driver.h"
+
+struct efx_tc_action_set {
+ u16 deliver:1;
+ u32 dest_mport;
+ u32 fw_id; /* index of this entry in firmware actions table */
+ struct list_head list;
+};
+
+struct efx_tc_match_fields {
+ /* L1 */
+ u32 ingress_port;
+};
+
+struct efx_tc_match {
+ struct efx_tc_match_fields value;
+ struct efx_tc_match_fields mask;
+};
+
+struct efx_tc_action_set_list {
+ struct list_head list;
+ u32 fw_id;
+};
+
+struct efx_tc_flow_rule {
+ struct efx_tc_match match;
+ struct efx_tc_action_set_list acts;
+ u32 fw_id;
+};
+
+enum efx_tc_rule_prios {
+ EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
+ EFX_TC_PRIO__NUM
+};
+
+/**
+ * struct efx_tc_state - control plane data for TC offload
+ *
+ * @reps_mport_id: MAE port allocated for representor RX
+ * @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
+ * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
+ * @reps_mport_vport_id: vport_id for representor RX filters
+ * @dflt: Match-action rules for default switching; at priority
+ * %EFX_TC_PRIO_DFLT. Named by *ingress* port
+ * @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
+ * @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
+ */
+struct efx_tc_state {
+ u32 reps_mport_id, reps_mport_vport_id;
+ s32 reps_filter_uc, reps_filter_mc;
+ struct {
+ struct efx_tc_flow_rule pf;
+ struct efx_tc_flow_rule wire;
+ } dflt;
+};
+
+struct efx_rep;
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule);
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx);
+void efx_tc_remove_rep_filters(struct efx_nic *efx);
+
+int efx_init_tc(struct efx_nic *efx);
+void efx_fini_tc(struct efx_nic *efx);
+
+int efx_init_struct_tc(struct efx_nic *efx);
+void efx_fini_struct_tc(struct efx_nic *efx);
+
+#endif /* EFX_TC_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 138bca611341..d12474042c84 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -512,7 +512,7 @@ unlock:
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
unsigned index, type;
@@ -559,6 +559,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
{
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
unsigned int read_ptr;
bool finished = false;
@@ -580,7 +581,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
/* Need to check the flag before dequeueing. */
if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -589,7 +591,7 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
- EFX_WARN_ON_PARANOID(pkts_compl != 1);
+ EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
efx_xmit_done_check_empty(tx_queue);
}
@@ -609,7 +611,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
- struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data;
unsigned tc, num_tc;
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 658ea2d34070..67e789b96c43 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -109,9 +109,11 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
++tx_queue->read_count;
}
@@ -146,7 +148,8 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
- unsigned int *bytes_compl)
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
@@ -164,9 +167,15 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
if (buffer->flags & EFX_TX_BUF_SKB) {
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
- EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
- (*pkts_compl)++;
- (*bytes_compl) += skb->len;
+ if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) {
+ EFX_WARN_ON_PARANOID(!efv_pkts_compl);
+ (*efv_pkts_compl)++;
+ } else {
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ }
+
if (tx_queue->timestamping &&
(tx_queue->completed_timestamp_major ||
tx_queue->completed_timestamp_minor)) {
@@ -199,7 +208,8 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
- unsigned int *bytes_compl)
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -218,7 +228,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
- efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
+ efv_pkts_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -241,15 +252,17 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ unsigned int efv_pkts_compl = 0;
struct efx_nic *efx = tx_queue->efx;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
- efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
- if (pkts_compl > 1)
+ if (pkts_compl + efv_pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
@@ -274,6 +287,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
+ unsigned int efv_pkts_compl = 0;
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
@@ -282,7 +296,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
+ &efv_pkts_compl);
}
}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index bbab7f248250..d87aecbc7bf1 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -19,7 +19,8 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
- unsigned int *bytes_compl);
+ unsigned int *bytes_compl,
+ unsigned int *efv_pkts_compl);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 929cfc22cd0c..31ff35174034 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -91,6 +91,9 @@ config DWMAC_IPQ806X
acceleration features available on this SoC. Network devices
will behave like standard non-accelerated ethernet interfaces.
+ Select the QCOM_SOCINFO config flag to enable specific dwmac
+ fixup based on the ipq806x SoC revision.
+
config DWMAC_LPC18XX
tristate "NXP LPC18xx/43xx DWMAC support"
default ARCH_LPC18XX
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index d2cdc02d9f94..2e8744ac6b91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
while (len != 0) {
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
desc = tx_q->dma_tx + entry;
if (len > bmax) {
@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
(((rx_q->dirty_rx) + 1) %
- priv->dma_rx_size) *
+ priv->dma_conf.dma_rx_size) *
sizeof(struct dma_desc)));
}
@@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
((tx_q->dirty_tx + 1) %
- priv->dma_tx_size))
+ priv->dma_conf.dma_tx_size))
* sizeof(struct dma_desc)));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 3fe720c5dc9f..52f9ed8db9c9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
priv->plat->mdio_bus_data->xpcs_an_inband = false;
} else {
priv->plat->max_speed = 1000;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
priv->plat->mdio_bus_data->xpcs_an_inband = true;
}
}
@@ -449,6 +448,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
static int intel_mgbe_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct fwnode_handle *fwnode;
char clk_name[20];
int ret;
int i;
@@ -567,12 +567,42 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Use the last Rx queue */
plat->vlan_fail_q = plat->rx_queues_to_use - 1;
+ /* For fixed-link setup, we allow phy-mode setting */
+ fwnode = dev_fwnode(&pdev->dev);
+ if (fwnode) {
+ int phy_mode;
+
+ /* "phy-mode" setting is optional. If it is set,
+ * we allow either sgmii or 1000base-x for now.
+ */
+ phy_mode = fwnode_get_phy_mode(fwnode);
+ if (phy_mode >= 0) {
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_1000BASEX)
+ plat->phy_interface = phy_mode;
+ else
+ dev_warn(&pdev->dev, "Invalid phy-mode\n");
+ }
+ }
+
/* Intel mgbe SGMII interface uses pcs-xcps */
- if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
plat->mdio_bus_data->has_xpcs = true;
plat->mdio_bus_data->xpcs_an_inband = true;
}
+ /* For fixed-link setup, we clear xpcs_an_inband */
+ if (fwnode) {
+ struct fwnode_handle *fixed_node;
+
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (fixed_node)
+ plat->mdio_bus_data->xpcs_an_inband = false;
+
+ fwnode_handle_put(fixed_node);
+ }
+
/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index f7dc8458cde8..e888c8a9c830 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -27,6 +27,8 @@
#include <linux/stmmac.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
#include "stmmac_platform.h"
@@ -64,6 +66,17 @@
#define NSS_COMMON_CLK_DIV_SGMII_100 4
#define NSS_COMMON_CLK_DIV_SGMII_10 49
+#define QSGMII_PCS_ALL_CH_CTL 0x80
+#define QSGMII_PCS_CH_SPEED_FORCE BIT(1)
+#define QSGMII_PCS_CH_SPEED_10 0x0
+#define QSGMII_PCS_CH_SPEED_100 BIT(2)
+#define QSGMII_PCS_CH_SPEED_1000 BIT(3)
+#define QSGMII_PCS_CH_SPEED_MASK (QSGMII_PCS_CH_SPEED_FORCE | \
+ QSGMII_PCS_CH_SPEED_10 | \
+ QSGMII_PCS_CH_SPEED_100 | \
+ QSGMII_PCS_CH_SPEED_1000)
+#define QSGMII_PCS_CH_SPEED_SHIFT(x) ((x) * 4)
+
#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
@@ -75,11 +88,20 @@
#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2)
#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
#define QSGMII_PHY_QSGMII_EN BIT(7)
-#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
-#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
-#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
-#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
-#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
+#define QSGMII_PHY_DEEMPHASIS_LVL_MASK GENMASK(11, 10)
+#define QSGMII_PHY_DEEMPHASIS_LVL(x) FIELD_PREP(QSGMII_PHY_DEEMPHASIS_LVL_MASK, (x))
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK GENMASK(14, 12)
+#define QSGMII_PHY_PHASE_LOOP_GAIN(x) FIELD_PREP(QSGMII_PHY_PHASE_LOOP_GAIN_MASK, (x))
+#define QSGMII_PHY_RX_DC_BIAS_MASK GENMASK(19, 18)
+#define QSGMII_PHY_RX_DC_BIAS(x) FIELD_PREP(QSGMII_PHY_RX_DC_BIAS_MASK, (x))
+#define QSGMII_PHY_RX_INPUT_EQU_MASK GENMASK(21, 20)
+#define QSGMII_PHY_RX_INPUT_EQU(x) FIELD_PREP(QSGMII_PHY_RX_INPUT_EQU_MASK, (x))
+#define QSGMII_PHY_CDR_PI_SLEW_MASK GENMASK(23, 22)
+#define QSGMII_PHY_CDR_PI_SLEW(x) FIELD_PREP(QSGMII_PHY_CDR_PI_SLEW_MASK, (x))
+#define QSGMII_PHY_TX_SLEW_MASK GENMASK(27, 26)
+#define QSGMII_PHY_TX_SLEW(x) FIELD_PREP(QSGMII_PHY_TX_SLEW_MASK, (x))
+#define QSGMII_PHY_TX_DRV_AMP_MASK GENMASK(31, 28)
+#define QSGMII_PHY_TX_DRV_AMP(x) FIELD_PREP(QSGMII_PHY_TX_DRV_AMP_MASK, (x))
struct ipq806x_gmac {
struct platform_device *pdev;
@@ -242,6 +264,113 @@ static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
ipq806x_gmac_set_speed(gmac, speed);
}
+static int
+ipq806x_gmac_configure_qsgmii_pcs_speed(struct ipq806x_gmac *gmac)
+{
+ struct platform_device *pdev = gmac->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn;
+ int link_speed;
+ int val = 0;
+ int ret;
+
+ /* Some bootloader may apply wrong configuration and cause
+ * not functioning port. If fixed link is not set,
+ * reset the force speed bit.
+ */
+ if (!of_phy_is_fixed_link(pdev->dev.of_node))
+ goto write;
+
+ dn = of_get_child_by_name(pdev->dev.of_node, "fixed-link");
+ ret = of_property_read_u32(dn, "speed", &link_speed);
+ of_node_put(dn);
+ if (ret) {
+ dev_err(dev, "found fixed-link node with no speed");
+ return ret;
+ }
+
+ val = QSGMII_PCS_CH_SPEED_FORCE;
+
+ switch (link_speed) {
+ case SPEED_1000:
+ val |= QSGMII_PCS_CH_SPEED_1000;
+ break;
+ case SPEED_100:
+ val |= QSGMII_PCS_CH_SPEED_100;
+ break;
+ case SPEED_10:
+ val |= QSGMII_PCS_CH_SPEED_10;
+ break;
+ }
+
+write:
+ regmap_update_bits(gmac->qsgmii_csr, QSGMII_PCS_ALL_CH_CTL,
+ QSGMII_PCS_CH_SPEED_MASK <<
+ QSGMII_PCS_CH_SPEED_SHIFT(gmac->id),
+ val <<
+ QSGMII_PCS_CH_SPEED_SHIFT(gmac->id));
+
+ return 0;
+}
+
+static const struct soc_device_attribute ipq806x_gmac_soc_v1[] = {
+ {
+ .revision = "1.*",
+ },
+ {
+ /* sentinel */
+ }
+};
+
+static int
+ipq806x_gmac_configure_qsgmii_params(struct ipq806x_gmac *gmac)
+{
+ struct platform_device *pdev = gmac->pdev;
+ const struct soc_device_attribute *soc;
+ struct device *dev = &pdev->dev;
+ u32 qsgmii_param;
+
+ switch (gmac->id) {
+ case 1:
+ soc = soc_device_match(ipq806x_gmac_soc_v1);
+
+ if (soc)
+ qsgmii_param = QSGMII_PHY_TX_DRV_AMP(0xc) |
+ QSGMII_PHY_TX_SLEW(0x2) |
+ QSGMII_PHY_DEEMPHASIS_LVL(0x2);
+ else
+ qsgmii_param = QSGMII_PHY_TX_DRV_AMP(0xd) |
+ QSGMII_PHY_TX_SLEW(0x0) |
+ QSGMII_PHY_DEEMPHASIS_LVL(0x0);
+
+ qsgmii_param |= QSGMII_PHY_RX_DC_BIAS(0x2);
+ break;
+ case 2:
+ case 3:
+ qsgmii_param = QSGMII_PHY_RX_DC_BIAS(0x3) |
+ QSGMII_PHY_TX_DRV_AMP(0xc);
+ break;
+ default: /* gmac 0 can't be set in SGMII mode */
+ dev_err(dev, "gmac id %d can't be in SGMII mode", gmac->id);
+ return -EINVAL;
+ }
+
+ /* Common params across all gmac id */
+ qsgmii_param |= QSGMII_PHY_CDR_EN |
+ QSGMII_PHY_RX_FRONT_EN |
+ QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+ QSGMII_PHY_TX_DRIVER_EN |
+ QSGMII_PHY_QSGMII_EN |
+ QSGMII_PHY_PHASE_LOOP_GAIN(0x4) |
+ QSGMII_PHY_RX_INPUT_EQU(0x1) |
+ QSGMII_PHY_CDR_PI_SLEW(0x2);
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+ qsgmii_param);
+
+ return 0;
+}
+
static int ipq806x_gmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -328,17 +457,13 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
- regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
- QSGMII_PHY_CDR_EN |
- QSGMII_PHY_RX_FRONT_EN |
- QSGMII_PHY_RX_SIGNAL_DETECT_EN |
- QSGMII_PHY_TX_DRIVER_EN |
- QSGMII_PHY_QSGMII_EN |
- 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
- 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
- 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
- 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
- 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+ err = ipq806x_gmac_configure_qsgmii_params(gmac);
+ if (err)
+ goto err_remove_config_dt;
+
+ err = ipq806x_gmac_configure_qsgmii_pcs_speed(gmac);
+ if (err)
+ goto err_remove_config_dt;
}
plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a57b0fa815ab..ea4910ae0921 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -197,7 +197,7 @@ static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
MMC_CNTRL, value);
}
-/* To mask all all interrupts.*/
+/* To mask all interrupts.*/
static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 8ad900949dc8..2b5b17d8b8a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
if (priv->extend_desc)
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
@@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f9e83964aa7e..bdbf86cb102a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -188,6 +188,18 @@ struct stmmac_rfs_entry {
int tc;
};
+struct stmmac_dma_conf {
+ unsigned int dma_buf_sz;
+
+ /* RX Queue */
+ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+ unsigned int dma_rx_size;
+
+ /* TX Queue */
+ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ unsigned int dma_tx_size;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -201,7 +213,6 @@ struct stmmac_priv {
int sph_cap;
u32 sarc_type;
- unsigned int dma_buf_sz;
unsigned int rx_copybreak;
u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en;
@@ -213,13 +224,7 @@ struct stmmac_priv {
int (*hwif_quirks)(struct stmmac_priv *priv);
struct mutex lock;
- /* RX Queue */
- struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
- unsigned int dma_rx_size;
-
- /* TX Queue */
- struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
- unsigned int dma_tx_size;
+ struct stmmac_dma_conf dma_conf;
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9c3055ee2608..d6a44d53fe08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -485,8 +485,8 @@ static void stmmac_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = DMA_MAX_RX_SIZE;
ring->tx_max_pending = DMA_MAX_TX_SIZE;
- ring->rx_pending = priv->dma_rx_size;
- ring->tx_pending = priv->dma_tx_size;
+ ring->rx_pending = priv->dma_conf.dma_rx_size;
+ ring->tx_pending = priv->dma_conf.dma_tx_size;
}
static int stmmac_set_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c5f33630e771..070b5ef165eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -74,8 +74,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
+#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -130,6 +130,9 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_queues_param(struct stmmac_priv *priv);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
@@ -231,7 +234,7 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
/* synchronize_rcu() needed for pending XDP buffers to drain */
for (queue = 0; queue < rx_queues_cnt; queue++) {
- rx_q = &priv->rx_queue[queue];
+ rx_q = &priv->dma_conf.rx_queue[queue];
if (rx_q->xsk_pool) {
synchronize_rcu();
break;
@@ -357,13 +360,13 @@ static void print_pkt(unsigned char *buf, int len)
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 avail;
if (tx_q->dirty_tx > tx_q->cur_tx)
avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
+ avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
@@ -375,13 +378,13 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
*/
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
u32 dirty;
if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
+ dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -409,7 +412,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
/* check if all TX queues have the work finished */
for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
return -EBUSY; /* still unfinished work */
@@ -1119,18 +1122,20 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct device_node *node;
+ struct fwnode_handle *fwnode;
int ret;
- node = priv->plat->phylink_node;
+ fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
- if (node)
- ret = phylink_of_phy_connect(priv->phylink, node, 0);
+ if (fwnode)
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
- if (!node || ret) {
+ if (!fwnode || ret) {
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
@@ -1218,7 +1223,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
return 0;
}
-static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+static void stmmac_display_rx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int desc_size;
@@ -1227,7 +1233,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
/* Display RX rings */
for (queue = 0; queue < rx_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
pr_info("\tRX Queue %u rings\n", queue);
@@ -1240,12 +1246,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
}
/* Display RX ring */
- stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
}
-static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+static void stmmac_display_tx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
unsigned int desc_size;
@@ -1254,7 +1261,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
/* Display TX rings */
for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
pr_info("\tTX Queue %d rings\n", queue);
@@ -1269,18 +1276,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
+ stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
tx_q->dma_tx_phy, desc_size);
}
}
-static void stmmac_display_rings(struct stmmac_priv *priv)
+static void stmmac_display_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* Display RX ring */
- stmmac_display_rx_rings(priv);
+ stmmac_display_rx_rings(priv, dma_conf);
/* Display TX ring */
- stmmac_display_tx_rings(priv);
+ stmmac_display_tx_rings(priv, dma_conf);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1304,44 +1312,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
/**
* stmmac_clear_rx_descriptors - clear RX descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* Description: this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
/* Clear the RX descriptors */
- for (i = 0; i < priv->dma_rx_size; i++)
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == priv->dma_rx_size - 1),
- priv->dma_buf_sz);
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == priv->dma_rx_size - 1),
- priv->dma_buf_sz);
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
}
/**
* stmmac_clear_tx_descriptors - clear tx descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index.
* Description: this function is called to clear the TX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
/* Clear the TX descriptors */
- for (i = 0; i < priv->dma_tx_size; i++) {
- int last = (i == (priv->dma_tx_size - 1));
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
+ int last = (i == (dma_conf->dma_tx_size - 1));
struct dma_desc *p;
if (priv->extend_desc)
@@ -1358,10 +1372,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
/**
* stmmac_clear_descriptors - clear descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* Description: this function is called to clear the TX and RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+static void stmmac_clear_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
@@ -1369,16 +1385,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
/* Clear the RX descriptors */
for (queue = 0; queue < rx_queue_cnt; queue++)
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
/* Clear the TX descriptors */
for (queue = 0; queue < tx_queue_cnt; queue++)
- stmmac_clear_tx_descriptors(priv, queue);
+ stmmac_clear_tx_descriptors(priv, dma_conf, queue);
}
/**
* stmmac_init_rx_buffers - init the RX descriptor buffer.
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @p: descriptor pointer
* @i: descriptor index
* @flags: gfp flag
@@ -1386,10 +1403,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
* Description: this function is called to allocate a receive buffer, perform
* the DMA mapping and init the descriptor.
*/
-static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ struct dma_desc *p,
int i, gfp_t flags, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
@@ -1418,7 +1437,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
return 0;
@@ -1427,12 +1446,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
/**
* stmmac_free_rx_buffer - free RX dma buffers
* @priv: private structure
- * @queue: RX queue index
+ * @rx_q: RX queue
* @i: buffer index.
*/
-static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
+ struct stmmac_rx_queue *rx_q,
+ int i)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page)
@@ -1447,12 +1467,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/**
* stmmac_free_tx_buffer - free RX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* @i: buffer index.
*/
-static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, int i)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
if (tx_q->tx_skbuff_dma[i].buf &&
tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
@@ -1491,23 +1514,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/**
* dma_free_rx_skbufs - free RX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_rx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++)
- stmmac_free_rx_buffer(priv, queue, i);
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, rx_q, i);
}
-static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
- gfp_t flags)
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct dma_desc *p;
int ret;
@@ -1516,7 +1544,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
else
p = rx_q->dma_rx + i;
- ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
queue);
if (ret)
return ret;
@@ -1530,14 +1558,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
/**
* dma_free_rx_xskbufs - free RX dma buffers from XSK pool
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->xdp)
@@ -1548,12 +1579,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
}
}
-static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
struct dma_desc *p;
@@ -1588,22 +1621,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q
/**
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int ret;
netif_dbg(priv, probe, priv->dev,
"(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy);
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
@@ -1630,32 +1666,31 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f
/* RX XDP ZC buffer pool may not be populated, e.g.
* xdpsock TX-only.
*/
- stmmac_alloc_rx_buffers_zc(priv, queue);
+ stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
} else {
- ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
if (ret < 0)
return -ENOMEM;
}
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
-
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx,
rx_q->dma_rx_phy,
- priv->dma_rx_size, 1);
+ dma_conf->dma_rx_size, 1);
else
stmmac_mode_init(priv, rx_q->dma_rx,
rx_q->dma_rx_phy,
- priv->dma_rx_size, 0);
+ dma_conf->dma_rx_size, 0);
}
return 0;
}
-static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_rx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
@@ -1667,7 +1702,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"SKB addresses:\nskb\t\tskb data\tdma data\n");
for (queue = 0; queue < rx_count; queue++) {
- ret = __init_dma_rx_desc_rings(priv, queue, flags);
+ ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
if (ret)
goto err_init_rx_buffers;
}
@@ -1676,12 +1711,12 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
err_init_rx_buffers:
while (queue >= 0) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
else
- dma_free_rx_skbufs(priv, queue);
+ dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
@@ -1695,14 +1730,17 @@ err_init_rx_buffers:
/**
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @priv: driver private structure
- * @queue : TX queue index
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index
* Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
netif_dbg(priv, probe, priv->dev,
@@ -1714,16 +1752,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy,
- priv->dma_tx_size, 1);
+ dma_conf->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy,
- priv->dma_tx_size, 0);
+ dma_conf->dma_tx_size, 0);
}
tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
- for (i = 0; i < priv->dma_tx_size; i++) {
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1742,16 +1780,11 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
tx_q->tx_skbuff[i] = NULL;
}
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
-
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
-
return 0;
}
-static int init_dma_tx_desc_rings(struct net_device *dev)
+static int init_dma_tx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt;
@@ -1760,7 +1793,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_queue_cnt = priv->plat->tx_queues_to_use;
for (queue = 0; queue < tx_queue_cnt; queue++)
- __init_dma_tx_desc_rings(priv, queue);
+ __init_dma_tx_desc_rings(priv, dma_conf, queue);
return 0;
}
@@ -1768,26 +1801,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
+ * @dma_conf: structure to take the dma data
* @flags: gfp flag.
* Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- ret = init_dma_rx_desc_rings(dev, flags);
+ ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
if (ret)
return ret;
- ret = init_dma_tx_desc_rings(dev);
+ ret = init_dma_tx_desc_rings(dev, dma_conf);
- stmmac_clear_descriptors(priv);
+ stmmac_clear_descriptors(priv, dma_conf);
if (netif_msg_hw(priv))
- stmmac_display_rings(priv);
+ stmmac_display_rings(priv, dma_conf);
return ret;
}
@@ -1795,17 +1831,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/**
* dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
*/
-static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_tx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
tx_q->xsk_frames_done = 0;
- for (i = 0; i < priv->dma_tx_size; i++)
- stmmac_free_tx_buffer(priv, queue, i);
+ for (i = 0; i < dma_conf->dma_tx_size; i++)
+ stmmac_free_tx_buffer(priv, dma_conf, queue, i);
if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
@@ -1824,34 +1863,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
u32 queue;
for (queue = 0; queue < tx_queue_cnt; queue++)
- dma_free_tx_skbufs(priv, queue);
+ dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
}
/**
* __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
/* Release the DMA RX socket buffers */
if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
else
- dma_free_rx_skbufs(priv, queue);
+ dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
@@ -1863,29 +1905,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
page_pool_destroy(rx_q->page_pool);
}
-static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
/* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++)
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, dma_conf, queue);
}
/**
* __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
*/
-static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size;
void *addr;
/* Release the DMA TX socket buffers */
- dma_free_tx_skbufs(priv, queue);
+ dma_free_tx_skbufs(priv, dma_conf, queue);
if (priv->extend_desc) {
size = sizeof(struct dma_extended_desc);
@@ -1898,7 +1944,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
addr = tx_q->dma_tx;
}
- size *= priv->dma_tx_size;
+ size *= dma_conf->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
@@ -1906,28 +1952,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
kfree(tx_q->tx_skbuff);
}
-static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
/* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++)
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, dma_conf, queue);
}
/**
* __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
@@ -1939,8 +1989,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- pp_params.pool_size = priv->dma_rx_size;
- num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.pool_size = dma_conf->dma_rx_size;
+ num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
@@ -1955,7 +2005,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return ret;
}
- rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
sizeof(*rx_q->buf_pool),
GFP_KERNEL);
if (!rx_q->buf_pool)
@@ -1963,7 +2013,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
+ dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
@@ -1972,7 +2022,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
} else {
rx_q->dma_rx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
+ dma_conf->dma_rx_size *
sizeof(struct dma_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
@@ -1997,7 +2047,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0;
}
-static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
@@ -2005,7 +2056,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
- ret = __alloc_dma_rx_desc_resources(priv, queue);
+ ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
if (ret)
goto err_dma;
}
@@ -2013,7 +2064,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
return 0;
err_dma:
- free_dma_rx_desc_resources(priv);
+ free_dma_rx_desc_resources(priv, dma_conf);
return ret;
}
@@ -2021,28 +2072,31 @@ err_dma:
/**
* __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size;
void *addr;
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
+ tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
return -ENOMEM;
- tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
+ tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!tx_q->tx_skbuff)
@@ -2055,7 +2109,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
else
size = sizeof(struct dma_desc);
- size *= priv->dma_tx_size;
+ size *= dma_conf->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
@@ -2072,7 +2126,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0;
}
-static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
@@ -2080,7 +2135,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
- ret = __alloc_dma_tx_desc_resources(priv, queue);
+ ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
if (ret)
goto err_dma;
}
@@ -2088,27 +2143,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
return 0;
err_dma:
- free_dma_tx_desc_resources(priv);
+ free_dma_tx_desc_resources(priv, dma_conf);
return ret;
}
/**
* alloc_dma_desc_resources - alloc TX/RX resources.
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* RX Allocation */
- int ret = alloc_dma_rx_desc_resources(priv);
+ int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
if (ret)
return ret;
- ret = alloc_dma_tx_desc_resources(priv);
+ ret = alloc_dma_tx_desc_resources(priv, dma_conf);
return ret;
}
@@ -2116,16 +2173,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
/**
* free_dma_desc_resources - free dma desc resources
* @priv: private structure
+ * @dma_conf: structure to take the dma data
*/
-static void free_dma_desc_resources(struct stmmac_priv *priv)
+static void free_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* Release the DMA TX socket buffers */
- free_dma_tx_desc_resources(priv);
+ free_dma_tx_desc_resources(priv, dma_conf);
/* Release the DMA RX socket buffers later
* to ensure all pending XDP_TX buffers are returned.
*/
- free_dma_rx_desc_resources(priv);
+ free_dma_rx_desc_resources(priv, dma_conf);
}
/**
@@ -2299,7 +2358,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/* configure all channels */
for (chan = 0; chan < rx_channels_count; chan++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
u32 buf_size;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
@@ -2314,7 +2373,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
chan);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
chan);
}
}
@@ -2330,7 +2389,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2405,7 +2464,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
@@ -2446,7 +2505,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
*/
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
@@ -2459,7 +2518,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
entry = tx_q->dirty_tx;
/* Try to clean all TX complete frame in 1 shot */
- while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
+ while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
struct xdp_frame *xdpf;
struct sk_buff *skb;
struct dma_desc *p;
@@ -2561,7 +2620,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
stmmac_release_tx_desc(priv, p, priv->mode);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
}
tx_q->dirty_tx = entry;
@@ -2626,17 +2685,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
*/
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan);
- dma_free_tx_skbufs(priv, chan);
- stmmac_clear_tx_descriptors(priv, chan);
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+ dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
+ stmmac_reset_tx_queue(priv, chan);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
stmmac_start_tx_dma(priv, chan);
@@ -2696,8 +2752,8 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
{
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan, dir);
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
struct stmmac_channel *ch = &priv->channel[chan];
struct napi_struct *rx_napi;
struct napi_struct *tx_napi;
@@ -2863,7 +2919,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
- rx_q = &priv->rx_queue[chan];
+ rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
@@ -2877,7 +2933,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_channels_count; chan++) {
- tx_q = &priv->tx_queue[chan];
+ tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
@@ -2892,7 +2948,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
@@ -2942,7 +2998,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
u32 chan;
for (chan = 0; chan < tx_channel_count; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
@@ -2964,12 +3020,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
/* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr,
- (priv->dma_tx_size - 1), chan);
+ (priv->dma_conf.dma_tx_size - 1), chan);
/* set RX ring length */
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_set_rx_ring_len(priv, priv->ioaddr,
- (priv->dma_rx_size - 1), chan);
+ (priv->dma_conf.dma_rx_size - 1), chan);
}
/**
@@ -3304,7 +3360,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Enable TSO */
if (priv->tso) {
for (chan = 0; chan < tx_cnt; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
/* TSO and TBS cannot co-exist */
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3326,7 +3382,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* TBS */
for (chan = 0; chan < tx_cnt; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
@@ -3370,7 +3426,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->tx_irq[j] > 0) {
irq_set_affinity_hint(priv->tx_irq[j], NULL);
- free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
+ free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
}
}
irq_idx = priv->plat->rx_queues_to_use;
@@ -3379,7 +3435,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->rx_irq[j] > 0) {
irq_set_affinity_hint(priv->rx_irq[j], NULL);
- free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
+ free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
}
}
@@ -3514,7 +3570,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
ret = request_irq(priv->rx_irq[i],
stmmac_msi_intr_rx,
- 0, int_name, &priv->rx_queue[i]);
+ 0, int_name, &priv->dma_conf.rx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc rx-%d MSI %d (error: %d)\n",
@@ -3539,7 +3595,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
ret = request_irq(priv->tx_irq[i],
stmmac_msi_intr_tx,
- 0, int_name, &priv->tx_queue[i]);
+ 0, int_name, &priv->dma_conf.tx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc tx-%d MSI %d (error: %d)\n",
@@ -3626,19 +3682,93 @@ static int stmmac_request_irq(struct net_device *dev)
}
/**
- * stmmac_open - open entry point of the driver
+ * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
+ * @priv: driver private structure
+ * @mtu: MTU to setup the dma queue and buf with
+ * Description: Allocate and generate a dma_conf based on the provided MTU.
+ * Allocate the Tx/Rx DMA queue and init them.
+ * Return value:
+ * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
+ */
+static struct stmmac_dma_conf *
+stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
+{
+ struct stmmac_dma_conf *dma_conf;
+ int chan, bfsize, ret;
+
+ dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
+ if (!dma_conf) {
+ netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bfsize = stmmac_set_16kib_bfsize(priv, mtu);
+ if (bfsize < 0)
+ bfsize = 0;
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(mtu, 0);
+
+ dma_conf->dma_buf_sz = bfsize;
+ /* Chose the tx/rx size from the already defined one in the
+ * priv struct. (if defined)
+ */
+ dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
+ dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
+
+ if (!dma_conf->dma_tx_size)
+ dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!dma_conf->dma_rx_size)
+ dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ }
+
+ ret = alloc_dma_desc_resources(priv, dma_conf);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto alloc_error;
+ }
+
+ ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ return dma_conf;
+
+init_error:
+ free_dma_desc_resources(priv, dma_conf);
+alloc_error:
+ kfree(dma_conf);
+ return ERR_PTR(ret);
+}
+
+/**
+ * __stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
+ * @dma_conf : structure to take the dma data
* Description:
* This function is the open entry point of the driver.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_open(struct net_device *dev)
+static int __stmmac_open(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface;
- int bfsize = 0;
u32 chan;
int ret;
@@ -3663,45 +3793,12 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
- if (bfsize < 0)
- bfsize = 0;
-
- if (bfsize < BUF_SIZE_16KiB)
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
-
- priv->dma_buf_sz = bfsize;
- buf_sz = bfsize;
-
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
- if (!priv->dma_tx_size)
- priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
- if (!priv->dma_rx_size)
- priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
-
- /* Earlier check for TBS */
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+ buf_sz = dma_conf->dma_buf_sz;
+ memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
- /* Setup per-TXQ tbs flag before TX descriptor alloc */
- tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
- }
-
- ret = alloc_dma_desc_resources(priv);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
- __func__);
- goto dma_desc_error;
- }
-
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
- __func__);
- goto init_error;
- }
+ stmmac_reset_queues_param(priv);
ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
@@ -3729,18 +3826,32 @@ irq_error:
phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv);
-dma_desc_error:
+ free_dma_desc_resources(priv, &priv->dma_conf);
phylink_disconnect_phy(priv->phylink);
init_phy_error:
pm_runtime_put(priv->device);
return ret;
}
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_dma_conf *dma_conf;
+ int ret;
+
+ dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
+ if (IS_ERR(dma_conf))
+ return PTR_ERR(dma_conf);
+
+ ret = __stmmac_open(dev, dma_conf);
+ kfree(dma_conf);
+ return ret;
+}
+
static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
@@ -3762,8 +3873,6 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- netif_tx_disable(dev);
-
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -3773,7 +3882,9 @@ static int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ netif_tx_disable(dev);
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
@@ -3787,7 +3898,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
@@ -3831,7 +3942,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
return false;
stmmac_set_tx_owner(priv, p);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
return true;
}
@@ -3849,7 +3960,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct dma_desc *desc;
u32 buff_size;
int tmp_len;
@@ -3860,7 +3971,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
dma_addr_t curr_addr;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- priv->dma_tx_size);
+ priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3888,7 +3999,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
int desc_size;
if (likely(priv->extend_desc))
@@ -3950,7 +4061,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t des;
int i;
- tx_q = &priv->tx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */
@@ -3958,7 +4069,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
hdr = sizeof(struct udphdr);
} else {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
hdr = tcp_hdrlen(skb);
}
@@ -3990,7 +4101,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- priv->dma_tx_size);
+ priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
@@ -4102,7 +4213,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
@@ -4190,7 +4301,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int entry, first_tx;
dma_addr_t des;
- tx_q = &priv->tx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
@@ -4253,7 +4364,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
@@ -4324,7 +4435,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
@@ -4439,7 +4550,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
@@ -4493,7 +4604,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
@@ -4521,12 +4632,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
/* First descriptor, not last descriptor and not split header */
if (status & rx_not_ls)
- return priv->dma_buf_sz;
+ return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe);
/* First descriptor and last descriptor and not split header */
- return min_t(unsigned int, priv->dma_buf_sz, plen);
+ return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
}
static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
@@ -4542,7 +4653,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
/* Not last descriptor */
if (status & rx_not_ls)
- return priv->dma_buf_sz;
+ return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe);
@@ -4553,7 +4664,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -4616,7 +4727,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
return STMMAC_XDP_TX;
@@ -4790,7 +4901,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int entry = rx_q->dirty_rx;
struct dma_desc *rx_desc = NULL;
bool ret = true;
@@ -4833,7 +4944,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
if (rx_desc) {
@@ -4848,7 +4959,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
@@ -4870,7 +4981,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
@@ -4917,7 +5028,7 @@ read_again:
/* Prefetch the next RX descriptor */
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- priv->dma_rx_size);
+ priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -5038,7 +5149,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
@@ -5051,7 +5162,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int buf_sz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -5065,7 +5176,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
@@ -5109,7 +5220,7 @@ read_again:
break;
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- priv->dma_rx_size);
+ priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -5244,7 +5355,7 @@ read_again:
buf1_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
- priv->dma_buf_sz);
+ priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page);
@@ -5256,7 +5367,7 @@ read_again:
buf2_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
- priv->dma_buf_sz);
+ priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
@@ -5440,18 +5551,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ struct stmmac_dma_conf *dma_conf;
const int mtu = new_mtu;
+ int ret;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
txfifosz /= priv->plat->tx_queues_to_use;
- if (netif_running(dev)) {
- netdev_err(priv->dev, "must be stopped to change its MTU\n");
- return -EBUSY;
- }
-
if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
return -EINVAL;
@@ -5463,8 +5571,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = mtu;
+ if (netif_running(dev)) {
+ netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
+ /* Try to allocate the new DMA conf with the new mtu */
+ dma_conf = stmmac_setup_dma_desc(priv, mtu);
+ if (IS_ERR(dma_conf)) {
+ netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
+ mtu);
+ return PTR_ERR(dma_conf);
+ }
+ stmmac_release(dev);
+
+ ret = __stmmac_open(dev, dma_conf);
+ kfree(dma_conf);
+ if (ret) {
+ netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
+ return ret;
+ }
+
+ stmmac_set_rx_mode(dev);
+ }
+
+ dev->mtu = mtu;
netdev_update_features(dev);
return 0;
@@ -5698,11 +5827,13 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
int chan = tx_q->queue_index;
struct stmmac_priv *priv;
int status;
- priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
+ dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
@@ -5728,10 +5859,12 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
{
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
int chan = rx_q->queue_index;
struct stmmac_priv *priv;
- priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
+ dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
@@ -5762,10 +5895,10 @@ static void stmmac_poll_controller(struct net_device *dev)
if (priv->plat->multi_msi_en) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
+ stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
+ stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
} else {
disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev);
@@ -5944,34 +6077,34 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
return 0;
for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
seq_printf(seq, "RX Queue %d:\n", queue);
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_erx,
- priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
+ priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
} else {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_rx,
- priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
+ priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
}
}
for (queue = 0; queue < tx_count; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
seq_printf(seq, "TX Queue %d:\n", queue);
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
- priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
+ priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
- priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
+ priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
}
}
@@ -6305,31 +6438,32 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_rx_dma(priv, queue);
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
}
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
u32 buf_size;
int ret;
- ret = __alloc_dma_rx_desc_resources(priv, queue);
+ ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc RX desc.\n");
return;
}
- ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
+ ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
if (ret) {
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init RX desc.\n");
return;
}
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_reset_rx_queue(priv, queue);
+ stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, rx_q->queue_index);
@@ -6346,7 +6480,7 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
rx_q->queue_index);
}
@@ -6367,30 +6501,31 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_tx_dma(priv, queue);
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
}
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
int ret;
- ret = __alloc_dma_tx_desc_resources(priv, queue);
+ ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc TX desc.\n");
return;
}
- ret = __init_dma_tx_desc_rings(priv, queue);
+ ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
if (ret) {
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init TX desc.\n");
return;
}
- stmmac_clear_tx_descriptors(priv, queue);
+ stmmac_reset_tx_queue(priv, queue);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, tx_q->queue_index);
@@ -6418,7 +6553,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
@@ -6427,7 +6562,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
@@ -6452,14 +6587,14 @@ int stmmac_xdp_open(struct net_device *dev)
u32 chan;
int ret;
- ret = alloc_dma_desc_resources(priv);
+ ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors allocation failed\n",
__func__);
goto dma_desc_error;
}
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors initialization failed\n",
__func__);
@@ -6477,7 +6612,7 @@ int stmmac_xdp_open(struct net_device *dev)
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
- rx_q = &priv->rx_queue[chan];
+ rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
@@ -6495,7 +6630,7 @@ int stmmac_xdp_open(struct net_device *dev)
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
rx_q->queue_index);
}
@@ -6504,7 +6639,7 @@ int stmmac_xdp_open(struct net_device *dev)
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_cnt; chan++) {
- tx_q = &priv->tx_queue[chan];
+ tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
@@ -6537,11 +6672,11 @@ int stmmac_xdp_open(struct net_device *dev)
irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
return ret;
}
@@ -6564,8 +6699,8 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
- rx_q = &priv->rx_queue[queue];
- tx_q = &priv->tx_queue[queue];
+ rx_q = &priv->dma_conf.rx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
@@ -6820,8 +6955,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
if (netif_running(dev))
stmmac_release(dev);
- priv->dma_rx_size = rx_size;
- priv->dma_tx_size = tx_size;
+ priv->dma_conf.dma_rx_size = rx_size;
+ priv->dma_conf.dma_tx_size = tx_size;
if (netif_running(dev))
ret = stmmac_open(dev);
@@ -7268,7 +7403,7 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
@@ -7317,6 +7452,25 @@ int stmmac_suspend(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+}
+
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
/**
* stmmac_reset_queues_param - reset queue parameters
* @priv: device pointer
@@ -7327,22 +7481,11 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
- for (queue = 0; queue < rx_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
- }
-
- for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
-
- tx_q->cur_tx = 0;
- tx_q->dirty_tx = 0;
- tx_q->mss = 0;
+ for (queue = 0; queue < rx_cnt; queue++)
+ stmmac_reset_rx_queue(priv, queue);
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
- }
+ for (queue = 0; queue < tx_cnt; queue++)
+ stmmac_reset_tx_queue(priv, queue);
}
/**
@@ -7402,7 +7545,7 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
stmmac_free_tx_skbufs(priv);
- stmmac_clear_descriptors(priv);
+ stmmac_clear_descriptors(priv, &priv->dma_conf);
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 03d3d1f7aa4b..5f177ea80725 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -434,9 +434,11 @@ int stmmac_mdio_register(struct net_device *ndev)
int err = 0;
struct mii_bus *new_bus;
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
+ struct fwnode_handle *fixed_node;
int addr, found, max_addr;
if (!mdio_bus_data)
@@ -490,6 +492,18 @@ int stmmac_mdio_register(struct net_device *ndev)
if (priv->plat->has_xgmac)
stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+ /* If fixed-link is set, skip PHY scanning */
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
+
+ if (fwnode) {
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (fixed_node) {
+ fwnode_handle_put(fixed_node);
+ goto bus_register_done;
+ }
+ }
+
if (priv->plat->phy_node || mdio_node)
goto bus_register_done;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 2fc51dc5eb0b..49af7e78b7f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
struct stmmac_channel *ch = &priv->channel[i];
u32 tail;
- tail = priv->rx_queue[i].dma_rx_phy +
- (priv->dma_rx_size * sizeof(struct dma_desc));
+ tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
+ (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i);
@@ -1680,7 +1680,7 @@ cleanup:
static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
{
struct stmmac_packet_attrs attr = { };
- int size = priv->dma_buf_sz;
+ int size = priv->dma_conf.dma_buf_sz;
attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN;
@@ -1763,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
/* Find first TBS enabled Queue, if any */
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
break;
if (i >= priv->plat->tx_queues_to_use)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index d61766eeac6d..773e415cc2de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1091,13 +1091,13 @@ static int tc_setup_etf(struct stmmac_priv *priv,
return -EOPNOTSUPP;
if (qopt->queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
- if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
+ if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
return -EINVAL;
if (qopt->enable)
- priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
+ priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
else
- priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
+ priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
netdev_info(priv->dev, "%s ETF for Queue %d\n",
qopt->enable ? "enabled" : "disabled", qopt->queue);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 435dc00d04e5..0b08b0e085e8 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -29,7 +29,7 @@
* -- on page reclamation, the driver swaps the page with a spare page.
* if that page is still in use, it frees its reference to that page,
* and allocates a new page for use. otherwise, it just recycles the
- * the page.
+ * page.
*
* NOTE: cassini can parse the header. however, it's not worth it
* as long as the network stack requires a header copy.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index ae5f05f03f88..2d91f4936d52 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -764,7 +764,7 @@
* PAUSE thresholds defined in terms of FIFO occupancy and may be translated
* into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
* when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
- * value is is 0x6F.
+ * value is 0x6F.
* DEFAULT: 0x00078
*/
#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 6b59b14e74b1..0cd8493b810f 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -335,7 +335,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
port->tsolen = 0;
/* Mark the port as belonging to ldmvsw which directs the
- * the common code to use the net_device in the vnet_port
+ * common code to use the net_device in the vnet_port
* rather than the net_device in the vnet (which is used
* by sunvnet). This bit is used by the VNET_PORT_TO_NET_DEVICE
* macro.
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 45bd89153de2..a14591b41acb 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1088,7 +1088,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
/* netif_stop_queue() must be done before checking
- * checking tx index in TX_BUFFS_AVAIL() below, because
+ * tx index in TX_BUFFS_AVAIL() below, because
* in gem_tx(), we update tx_old before checking for
* netif_queue_stopped().
*/
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 3773ce5e12cc..546206640492 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -494,7 +494,7 @@ static int spl2sw_probe(struct platform_device *pdev)
/* Add and enable napi. */
netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll, NAPI_POLL_WEIGHT);
napi_enable(&comm->rx_napi);
- netif_napi_add(ndev, &comm->tx_napi, spl2sw_tx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll);
napi_enable(&comm->tx_napi);
return 0;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index d435519236e4..e54ce73396ee 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -81,7 +81,7 @@ static int xlgmac_prep_tso(struct sk_buff *skb,
if (ret)
return ret;
- pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pkt_info->header_len = skb_tcp_all_headers(skb);
pkt_info->tcp_header_len = tcp_hdrlen(skb);
pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
pkt_info->mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index c02a9654dce6..ffdac6fac054 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -938,7 +938,7 @@ enum velocity_owner {
#define IMR_MASK_VALUE 0x0013FB0FUL /* initial value of IMR
ignore MIBFI,RACEI to
reduce intr. frequency
- NOTE.... do not enable NoBuf int mask at driver driver
+ NOTE.... do not enable NoBuf int mask at driver
when (1) NoBuf -> RxThreshold = SF
(2) OK -> RxThreshold = original value
*/
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
new file mode 100644
index 000000000000..baa1f0a5cc37
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Wangxun network device configuration
+#
+
+config NET_VENDOR_WANGXUN
+ bool "Wangxun devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_WANGXUN
+
+config TXGBE
+ tristate "Wangxun(R) 10GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Wangxun(R) 10GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called txgbe.
+
+endif # NET_VENDOR_WANGXUN
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
new file mode 100644
index 000000000000..c34db1bead25
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Wangxun network device drivers.
+#
+
+obj-$(CONFIG_TXGBE) += txgbe/
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
new file mode 100644
index 000000000000..431303ca75b4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_TXGBE) += txgbe.o
+
+txgbe-objs := txgbe_main.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
new file mode 100644
index 000000000000..38ddbde0ed0f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_H_
+#define _TXGBE_H_
+
+#include "txgbe_type.h"
+
+#define TXGBE_MAX_FDIR_INDICES 63
+
+#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+
+/* board specific private data structure */
+struct txgbe_adapter {
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+};
+
+extern char txgbe_driver_name[];
+
+#endif /* _TXGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
new file mode 100644
index 000000000000..d3b9f73ecba4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+
+#include "txgbe.h"
+
+char txgbe_driver_name[] = "txgbe";
+
+/* txgbe_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id txgbe_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static void txgbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ txgbe_dev_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * txgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in txgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * txgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int txgbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct txgbe_adapter *adapter = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ txgbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct txgbe_adapter),
+ TXGBE_MAX_TX_QUEUES,
+ TXGBE_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->io_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, adapter);
+
+ return 0;
+
+err_pci_release_regions:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * txgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * txgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void txgbe_remove(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver txgbe_driver = {
+ .name = txgbe_driver_name,
+ .id_table = txgbe_pci_tbl,
+ .probe = txgbe_probe,
+ .remove = txgbe_remove,
+ .shutdown = txgbe_shutdown,
+};
+
+module_pci_driver(txgbe_driver);
+
+MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
+MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
new file mode 100644
index 000000000000..b2e329f50bae
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_TYPE_H_
+#define _TXGBE_TYPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+/************ txgbe_register.h ************/
+/* Vendor ID */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+/* Device IDs */
+#define TXGBE_DEV_ID_SP1000 0x1001
+#define TXGBE_DEV_ID_WX1820 0x2001
+
+/* Subsystem IDs */
+/* SFP */
+#define TXGBE_ID_SP1000_SFP 0x0000
+#define TXGBE_ID_WX1820_SFP 0x2000
+#define TXGBE_ID_SFP 0x00
+
+/* copper */
+#define TXGBE_ID_SP1000_XAUI 0x1010
+#define TXGBE_ID_WX1820_XAUI 0x2010
+#define TXGBE_ID_XAUI 0x10
+#define TXGBE_ID_SP1000_SGMII 0x1020
+#define TXGBE_ID_WX1820_SGMII 0x2020
+#define TXGBE_ID_SGMII 0x20
+/* backplane */
+#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030
+#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030
+#define TXGBE_ID_KR_KX_KX4 0x30
+/* MAC Interface */
+#define TXGBE_ID_SP1000_MAC_XAUI 0x1040
+#define TXGBE_ID_WX1820_MAC_XAUI 0x2040
+#define TXGBE_ID_MAC_XAUI 0x40
+#define TXGBE_ID_SP1000_MAC_SGMII 0x1060
+#define TXGBE_ID_WX1820_MAC_SGMII 0x2060
+#define TXGBE_ID_MAC_SGMII 0x60
+
+#define TXGBE_NCSI_SUP 0x8000
+#define TXGBE_NCSI_MASK 0x8000
+#define TXGBE_WOL_SUP 0x4000
+#define TXGBE_WOL_MASK 0x4000
+#define TXGBE_DEV_MASK 0xf0
+
+/* Combined interface*/
+#define TXGBE_ID_SFI_XAUI 0x50
+
+/* Revision ID */
+#define TXGBE_SP_MPW 1
+
+#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 48f544f6c999..2772a79cd3ed 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -106,7 +106,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* Return: 0 on success, -ETIMEDOUT on a timeout
*
* Writes the value to the requested register by first writing the value
- * into MWD register. The the MCR register is then appropriately setup
+ * into MWD register. The MCR register is then appropriately setup
* to finish the write operation.
*/
static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 89770c2e0ffb..3591b9edc9a1 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -29,6 +29,7 @@
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
@@ -156,7 +157,7 @@ struct eth_plat_info {
u8 phy; /* MII PHY ID, 0 - 31 */
u8 rxq; /* configurable, currently 0 - 31 only */
u8 txreadyq;
- u8 hwaddr[6];
+ u8 hwaddr[ETH_ALEN];
u8 npe; /* NPE instance used by this interface */
bool has_mdio; /* If this instance has an MDIO bus */
};
@@ -1387,6 +1388,7 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
struct of_phandle_args npe_spec;
struct device_node *mdio_np;
struct eth_plat_info *plat;
+ u8 mac[ETH_ALEN];
int ret;
plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
@@ -1428,6 +1430,12 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
}
plat->txreadyq = queue_spec.args[0];
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(plat->hwaddr, mac, ETH_ALEN);
+ }
+
return plat;
}
@@ -1487,7 +1495,10 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- eth_hw_addr_set(ndev, plat->hwaddr);
+ if (is_valid_ether_addr(plat->hwaddr))
+ eth_hw_addr_set(ndev, plat->hwaddr);
+ else
+ eth_hw_addr_random(ndev);
platform_set_drvdata(pdev, ndev);