aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c9
-rw-r--r--drivers/net/arcnet/com20020_cs.c2
-rw-r--r--drivers/net/bareudp.c13
-rw-r--r--drivers/net/bonding/bond_main.c174
-rw-r--r--drivers/net/bonding/bond_options.c55
-rw-r--r--drivers/net/can/Makefile7
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/c_can/c_can.c4
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c1338
-rw-r--r--drivers/net/can/dev/Makefile11
-rw-r--r--drivers/net/can/dev/bittiming.c261
-rw-r--r--drivers/net/can/dev/dev.c467
-rw-r--r--drivers/net/can/dev/length.c90
-rw-r--r--drivers/net/can/dev/netlink.c379
-rw-r--r--drivers/net/can/dev/rx-offload.c (renamed from drivers/net/can/rx-offload.c)5
-rw-r--r--drivers/net/can/dev/skb.c231
-rw-r--r--drivers/net/can/flexcan.c130
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c4
-rw-r--r--drivers/net/can/kvaser_pciefd.c6
-rw-r--r--drivers/net/can/m_can/Makefile4
-rw-r--r--drivers/net/can/m_can/m_can.c8
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c (renamed from drivers/net/can/m_can/tcan4x5x.c)122
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c135
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h57
-rw-r--r--drivers/net/can/mscan/mscan.c4
-rw-r--r--drivers/net/can/pch_can.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c4
-rw-r--r--drivers/net/can/rcar/rcar_can.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/can/spi/hi311x.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c4
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/usb/esd_usb2.c4
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/xilinx_can.c6
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c113
-rw-r--r--drivers/net/dsa/b53/b53_priv.h16
-rw-r--r--drivers/net/dsa/bcm_sf2.c38
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c10
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h1
-rw-r--r--drivers/net/dsa/dsa_loop.c74
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c45
-rw-r--r--drivers/net/dsa/lan9303-core.c12
-rw-r--r--drivers/net/dsa/lantiq_gswip.c103
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c78
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c98
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c25
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h8
-rw-r--r--drivers/net/dsa/mt7530.c54
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c456
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h17
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c21
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h5
-rw-r--r--drivers/net/dsa/ocelot/felix.c276
-rw-r--r--drivers/net/dsa/ocelot/felix.h2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c37
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c20
-rw-r--r--drivers/net/dsa/qca/ar9331.c165
-rw-r--r--drivers/net/dsa/qca8k.c38
-rw-r--r--drivers/net/dsa/realtek-smi-core.h9
-rw-r--r--drivers/net/dsa/rtl8366.c152
-rw-r--r--drivers/net/dsa/rtl8366rb.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c101
-rw-r--r--drivers/net/dsa/xrs700x/Kconfig26
-rw-r--r--drivers/net/dsa/xrs700x/Makefile4
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c622
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.h42
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c150
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c163
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_reg.h203
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c186
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h14
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c82
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c114
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h59
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/broadcom/unimac.h68
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c133
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h13
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c135
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c61
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c181
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c32
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c33
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c14
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c64
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c153
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c1592
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h167
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c1640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c165
-rw-r--r--drivers/net/ethernet/micrel/Kconfig4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c114
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/mscc/Makefile3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c50
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h10
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c885
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c277
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c143
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c83
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c49
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c196
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c397
-rw-r--r--drivers/net/ethernet/renesas/ravb.h37
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/rocker/rocker.h6
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c61
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c43
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx.c10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c10
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c91
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c96
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c70
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c8
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c18
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/geneve.c19
-rw-r--r--drivers/net/gtp.c527
-rw-r--r--drivers/net/hyperv/netvsc.c40
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/hyperv/rndis_filter.c171
-rw-r--r--drivers/net/ipa/Kconfig10
-rw-r--r--drivers/net/ipa/gsi.c140
-rw-r--r--drivers/net/ipa/gsi_trans.h1
-rw-r--r--drivers/net/ipa/ipa_clock.c194
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c38
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c38
-rw-r--r--drivers/net/ipa/ipa_data.h26
-rw-r--r--drivers/net/ipa/ipa_endpoint.c4
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mhi_net.c15
-rw-r--r--drivers/net/netdevsim/netdev.c2
-rw-r--r--drivers/net/phy/at803x.c84
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/sfp-bus.c20
-rw-r--r--drivers/net/phy/sfp.c91
-rw-r--r--drivers/net/ppp/pptp.c8
-rw-r--r--drivers/net/tap.c12
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/veth.c14
-rw-r--r--drivers/net/virtio_net.c18
-rw-r--r--drivers/net/vxlan.c20
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c4
-rw-r--r--drivers/net/xen-netfront.c16
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c115
-rw-r--r--drivers/vhost/net.c30
271 files changed, 11173 insertions, 7112 deletions
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index b8e6acdf932e..8af978bd0000 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -840,6 +840,15 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ /*
+ * We know that the device has an endpoint because we verified by
+ * interrogating the firmware. This is the case when the device was not
+ * yet discovered by the fsl-mc bus, thus the lookup returned NULL.
+ * Differentiate this case by returning EPROBE_DEFER.
+ */
+ if (!endpoint)
+ return ERR_PTR(-EPROBE_DEFER);
+
return endpoint;
}
EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index cf607ffcf358..81223f6bebcc 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -67,7 +67,7 @@ static void regdump(struct net_device *dev)
/* set up the address register */
count = 0;
arcnet_outb((count >> 8) | RDDATAflag | AUTOINCflag,
- ioaddr, com20020_REG_W_ADDR_HI);
+ ioaddr, COM20020_REG_W_ADDR_HI);
arcnet_outb(count & 0xff, ioaddr, COM20020_REG_W_ADDR_LO);
for (count = 0; count < 256 + 32; count++) {
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 85de5f96c02b..1b8f59713fc7 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -532,11 +532,12 @@ static void bareudp_setup(struct net_device *dev)
dev->netdev_ops = &bareudp_netdev_ops;
dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hard_header_len = 0;
dev->addr_len = 0;
@@ -658,7 +659,6 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct bareudp_conf conf;
- LIST_HEAD(list_kill);
int err;
err = bareudp2info(data, &conf, extack);
@@ -676,8 +676,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
return 0;
err_unconfig:
- bareudp_dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
+ bareudp_dellink(dev, NULL);
return err;
}
@@ -729,7 +728,6 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
- LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
@@ -753,8 +751,7 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name,
return dev;
err:
- bareudp_dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
+ bareudp_dellink(dev, NULL);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(bareudp_dev_create);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5fe5232cc3f3..74cbbb22470b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -83,6 +83,9 @@
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+#include <net/tls.h>
+#endif
#include "bonding_priv.h"
@@ -164,7 +167,7 @@ module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
"2 for layer 2+3, 3 for encap layer 2+3, "
- "4 for encap layer 3+4");
+ "4 for encap layer 3+4, 5 for vlan+srcmac");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
@@ -301,6 +304,19 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
return dev_queue_xmit(skb);
}
+bool bond_sk_check(struct bonding *bond)
+{
+ switch (BOND_MODE(bond)) {
+ case BOND_MODE_8023AD:
+ case BOND_MODE_XOR:
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
/*---------------------------------- VLAN -----------------------------------*/
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
@@ -1212,6 +1228,13 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (bond_sk_check(bond))
+ features |= BOND_TLS_FEATURES;
+ else
+ features &= ~BOND_TLS_FEATURES;
+#endif
+
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
@@ -1434,6 +1457,8 @@ static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
return NETDEV_LAG_HASH_E23;
case BOND_XMIT_POLICY_ENCAP34:
return NETDEV_LAG_HASH_E34;
+ case BOND_XMIT_POLICY_VLAN_SRCMAC:
+ return NETDEV_LAG_HASH_VLAN_SRCMAC;
default:
return NETDEV_LAG_HASH_UNKNOWN;
}
@@ -1922,6 +1947,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_unregister;
}
+ bond_lower_state_changed(new_slave);
+
res = bond_sysfs_slave_add(new_slave);
if (res) {
slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
@@ -3494,6 +3521,27 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
return true;
}
+static u32 bond_vlan_srcmac_hash(struct sk_buff *skb)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ u32 srcmac_vendor = 0, srcmac_dev = 0;
+ u16 vlan;
+ int i;
+
+ for (i = 0; i < 3; i++)
+ srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
+
+ for (i = 3; i < ETH_ALEN; i++)
+ srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
+
+ if (!skb_vlan_tag_present(skb))
+ return srcmac_vendor ^ srcmac_dev;
+
+ vlan = skb_vlan_tag_get(skb);
+
+ return vlan ^ srcmac_vendor ^ srcmac_dev;
+}
+
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
struct flow_keys *fk)
@@ -3501,10 +3549,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
int noff, proto = -1;
- if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) {
+ switch (bond->params.xmit_policy) {
+ case BOND_XMIT_POLICY_ENCAP23:
+ case BOND_XMIT_POLICY_ENCAP34:
memset(fk, 0, sizeof(*fk));
return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
fk, NULL, 0, 0, 0, 0);
+ default:
+ break;
}
fk->ports.ports = 0;
@@ -3539,6 +3591,16 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
return true;
}
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+{
+ hash ^= (__force u32)flow_get_u32_dst(flow) ^
+ (__force u32)flow_get_u32_src(flow);
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
+ /* discard lowest hash bit to deal with the common even ports pattern */
+ return hash >> 1;
+}
+
/**
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
@@ -3556,6 +3618,9 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb->l4_hash)
return skb->hash;
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
+ return bond_vlan_srcmac_hash(skb);
+
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, &flow))
return bond_eth_hash(skb);
@@ -3569,12 +3634,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
else
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- hash ^= (__force u32)flow_get_u32_dst(&flow) ^
- (__force u32)flow_get_u32_src(&flow);
- hash ^= (hash >> 16);
- hash ^= (hash >> 8);
- return hash >> 1;
+ return bond_ip_hash(hash, &flow);
}
/*-------------------------- Device entry points ----------------------------*/
@@ -4547,6 +4608,95 @@ static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
return NULL;
}
+static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
+{
+ switch (sk->sk_family) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (sk->sk_ipv6only ||
+ ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
+ flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
+ break;
+ }
+ fallthrough;
+#endif
+ default: /* AF_INET */
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
+ flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
+ break;
+ }
+
+ flow->ports.src = inet_sk(sk)->inet_sport;
+ flow->ports.dst = inet_sk(sk)->inet_dport;
+}
+
+/**
+ * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
+ * @sk: socket to use for headers
+ *
+ * This function will extract the necessary field from the socket and use
+ * them to generate a hash based on the LAYER34 xmit_policy.
+ * Assumes that sk is a TCP or UDP socket.
+ */
+static u32 bond_sk_hash_l34(struct sock *sk)
+{
+ struct flow_keys flow;
+ u32 hash;
+
+ bond_sk_to_flow(sk, &flow);
+
+ /* L4 */
+ memcpy(&hash, &flow.ports.ports, sizeof(hash));
+ /* L3 */
+ return bond_ip_hash(hash, &flow);
+}
+
+static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
+ struct sock *sk)
+{
+ struct bond_up_slave *slaves;
+ struct slave *slave;
+ unsigned int count;
+ u32 hash;
+
+ slaves = rcu_dereference(bond->usable_slaves);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (unlikely(!count))
+ return NULL;
+
+ hash = bond_sk_hash_l34(sk);
+ slave = slaves->arr[hash % count];
+
+ return slave->dev;
+}
+
+static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
+ struct sock *sk)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct net_device *lower = NULL;
+
+ rcu_read_lock();
+ if (bond_sk_check(bond))
+ lower = __bond_sk_get_lower_dev(bond, sk);
+ rcu_read_unlock();
+
+ return lower;
+}
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ return bond_tx_drop(dev, skb);
+}
+#endif
+
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
@@ -4555,6 +4705,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
!bond_slave_override(bond, skb))
return NETDEV_TX_OK;
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
+ return bond_tls_device_xmit(bond, skb, dev);
+#endif
+
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return bond_xmit_roundrobin(skb, dev);
@@ -4683,6 +4838,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_fix_features = bond_fix_features,
.ndo_features_check = passthru_features_check,
.ndo_get_xmit_slave = bond_xmit_get_slave,
+ .ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
};
static const struct device_type bond_type = {
@@ -4754,6 +4910,10 @@ void bond_setup(struct net_device *bond_dev)
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ if (bond_sk_check(bond))
+ bond_dev->features |= BOND_TLS_FEATURES;
+#endif
}
/* Destroy a bonding device.
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index a4e4e15f574d..77d7c38bd435 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -96,12 +96,13 @@ static const struct bond_opt_value bond_pps_tbl[] = {
};
static const struct bond_opt_value bond_xmit_hashtype_tbl[] = {
- { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
- { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
- { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
- { "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
- { "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
- { NULL, -1, 0},
+ { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
+ { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
+ { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
+ { "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
+ { "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
+ { "vlan+srcmac", BOND_XMIT_POLICY_VLAN_SRCMAC, 0},
+ { NULL, -1, 0},
};
static const struct bond_opt_value bond_arp_validate_tbl[] = {
@@ -745,17 +746,30 @@ const struct bond_option *bond_opt_get(unsigned int option)
return &bond_opts[option];
}
-static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode)
+static bool bond_set_xfrm_features(struct bonding *bond)
{
if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
- return;
+ return false;
+
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
+ bond->dev->wanted_features |= BOND_XFRM_FEATURES;
+ else
+ bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
+
+ return true;
+}
+
+static bool bond_set_tls_features(struct bonding *bond)
+{
+ if (!IS_ENABLED(CONFIG_TLS_DEVICE))
+ return false;
- if (mode == BOND_MODE_ACTIVEBACKUP)
- bond_dev->wanted_features |= BOND_XFRM_FEATURES;
+ if (bond_sk_check(bond))
+ bond->dev->wanted_features |= BOND_TLS_FEATURES;
else
- bond_dev->wanted_features &= ~BOND_XFRM_FEATURES;
+ bond->dev->wanted_features &= ~BOND_TLS_FEATURES;
- netdev_update_features(bond_dev);
+ return true;
}
static int bond_option_mode_set(struct bonding *bond,
@@ -780,13 +794,20 @@ static int bond_option_mode_set(struct bonding *bond,
if (newval->value == BOND_MODE_ALB)
bond->params.tlb_dynamic_lb = 1;
- if (bond->dev->reg_state == NETREG_REGISTERED)
- bond_set_xfrm_features(bond->dev, newval->value);
-
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
+ if (bond->dev->reg_state == NETREG_REGISTERED) {
+ bool update = false;
+
+ update |= bond_set_xfrm_features(bond);
+ update |= bond_set_tls_features(bond);
+
+ if (update)
+ netdev_update_features(bond->dev);
+ }
+
return 0;
}
@@ -1219,6 +1240,10 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
newval->string, newval->value);
bond->params.xmit_policy = newval->value;
+ if (bond->dev->reg_state == NETREG_REGISTERED)
+ if (bond_set_tls_features(bond))
+ netdev_update_features(bond->dev);
+
return 0;
}
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 22164300122d..a2b4463d8480 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan.o
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += dev.o
-can-dev-y += rx-offload.o
-
-can-dev-$(CONFIG_CAN_LEDS) += led.o
-
+obj-y += dev/
obj-y += rcar/
obj-y += spi/
obj-y += usb/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 5284f0ab3b06..9ad9b39f480e 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -484,7 +484,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += cf->len;
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
+ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
/*
* we have to stop the queue and deliver all messages in case
@@ -856,7 +856,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
+ can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
dev->stats.tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 63f48b016ecd..ef474bae47a1 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -476,7 +476,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
*/
c_can_setup_tx_object(dev, IF_TX, frame, idx);
priv->dlc[idx] = frame->len;
- can_put_echo_skb(skb, dev, idx);
+ can_put_echo_skb(skb, dev, idx, 0);
/* Update the active bits */
atomic_add((1 << idx), &priv->tx_active);
@@ -733,7 +733,7 @@ static void c_can_do_tx(struct net_device *dev)
pend &= ~(1 << idx);
obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
c_can_inval_tx_object(dev, IF_RX, obj);
- can_get_echo_skb(dev, idx);
+ can_get_echo_skb(dev, idx, NULL);
bytes += priv->dlc[idx];
pkts++;
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 8d9f332c35e0..f8a130f594e2 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -702,8 +702,8 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
stats->tx_bytes += cf->len;
stats->tx_packets++;
- can_put_echo_skb(priv->tx_skb, dev, 0);
- can_get_echo_skb(dev, 0);
+ can_put_echo_skb(priv->tx_skb, dev, 0, 0);
+ can_get_echo_skb(dev, 0, NULL);
priv->tx_skb = NULL;
netif_wake_queue(dev);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
deleted file mode 100644
index 8b1ae023cb21..000000000000
--- a/drivers/net/can/dev.c
+++ /dev/null
@@ -1,1338 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
- * Copyright (C) 2006 Andrey Volkov, Varma Electronics
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
-#include <linux/can.h>
-#include <linux/can/can-ml.h>
-#include <linux/can/dev.h>
-#include <linux/can/skb.h>
-#include <linux/can/netlink.h>
-#include <linux/can/led.h>
-#include <linux/of.h>
-#include <net/rtnetlink.h>
-
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-
-/* CAN DLC to real data length conversion helpers */
-
-static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
- 8, 12, 16, 20, 24, 32, 48, 64};
-
-/* get data length from raw data length code (DLC) */
-u8 can_fd_dlc2len(u8 dlc)
-{
- return dlc2len[dlc & 0x0F];
-}
-EXPORT_SYMBOL_GPL(can_fd_dlc2len);
-
-static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
- 9, 9, 9, 9, /* 9 - 12 */
- 10, 10, 10, 10, /* 13 - 16 */
- 11, 11, 11, 11, /* 17 - 20 */
- 12, 12, 12, 12, /* 21 - 24 */
- 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
- 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
- 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
- 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
- 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
-
-/* map the sanitized data length to an appropriate data length code */
-u8 can_fd_len2dlc(u8 len)
-{
- if (unlikely(len > 64))
- return 0xF;
-
- return len2dlc[len];
-}
-EXPORT_SYMBOL_GPL(can_fd_len2dlc);
-
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@cmp.felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
-{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
- (tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
-
- return best_sample_point;
-}
-
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800000)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500000)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
- }
-
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
- }
-
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
-
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
-
- return 0;
-}
-#else /* !CONFIG_CAN_CALC_BITTIMING */
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- netdev_err(dev, "bit-timing calculation not available\n");
- return -EINVAL;
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
-/* Checks the validity of the specified bit-timing parameters prop_seg,
- * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
- * prescaler value brp. You can find more information in the header
- * file linux/can/netlink.h.
- */
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
-{
- struct can_priv *priv = netdev_priv(dev);
- int tseg1, alltseg;
- u64 brp64;
-
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
-
- brp64 = (u64)priv->clock.freq * (u64)bt->tq;
- if (btc->brp_inc > 1)
- do_div(brp64, btc->brp_inc);
- brp64 += 500000000UL - 1;
- do_div(brp64, 1000000000UL); /* the practicable BRP */
- if (btc->brp_inc > 1)
- brp64 *= btc->brp_inc;
- bt->brp = (u32)brp64;
-
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
- return -EINVAL;
-
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
-
- return 0;
-}
-
-/* Checks the validity of predefined bitrate settings */
-static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
- const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
-{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int i;
-
- for (i = 0; i < bitrate_const_cnt; i++) {
- if (bt->bitrate == bitrate_const[i])
- break;
- }
-
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
-
- return 0;
-}
-
-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc,
- const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
-{
- int err;
-
- /* Depending on the given can_bittiming parameter structure the CAN
- * timing parameters are calculated based on the provided bitrate OR
- * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
- * provided directly which are then checked and fixed up.
- */
- if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
-
- return err;
-}
-
-static void can_update_state_error_stats(struct net_device *dev,
- enum can_state new_state)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (new_state <= priv->state)
- return;
-
- switch (new_state) {
- case CAN_STATE_ERROR_WARNING:
- priv->can_stats.error_warning++;
- break;
- case CAN_STATE_ERROR_PASSIVE:
- priv->can_stats.error_passive++;
- break;
- case CAN_STATE_BUS_OFF:
- priv->can_stats.bus_off++;
- break;
- default:
- break;
- }
-}
-
-static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return CAN_ERR_CRTL_ACTIVE;
- case CAN_STATE_ERROR_WARNING:
- return CAN_ERR_CRTL_TX_WARNING;
- case CAN_STATE_ERROR_PASSIVE:
- return CAN_ERR_CRTL_TX_PASSIVE;
- default:
- return 0;
- }
-}
-
-static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return CAN_ERR_CRTL_ACTIVE;
- case CAN_STATE_ERROR_WARNING:
- return CAN_ERR_CRTL_RX_WARNING;
- case CAN_STATE_ERROR_PASSIVE:
- return CAN_ERR_CRTL_RX_PASSIVE;
- default:
- return 0;
- }
-}
-
-static const char *can_get_state_str(const enum can_state state)
-{
- switch (state) {
- case CAN_STATE_ERROR_ACTIVE:
- return "Error Active";
- case CAN_STATE_ERROR_WARNING:
- return "Error Warning";
- case CAN_STATE_ERROR_PASSIVE:
- return "Error Passive";
- case CAN_STATE_BUS_OFF:
- return "Bus Off";
- case CAN_STATE_STOPPED:
- return "Stopped";
- case CAN_STATE_SLEEPING:
- return "Sleeping";
- default:
- return "<unknown>";
- }
-
- return "<unknown>";
-}
-
-void can_change_state(struct net_device *dev, struct can_frame *cf,
- enum can_state tx_state, enum can_state rx_state)
-{
- struct can_priv *priv = netdev_priv(dev);
- enum can_state new_state = max(tx_state, rx_state);
-
- if (unlikely(new_state == priv->state)) {
- netdev_warn(dev, "%s: oops, state did not change", __func__);
- return;
- }
-
- netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
- can_get_state_str(priv->state), priv->state,
- can_get_state_str(new_state), new_state);
-
- can_update_state_error_stats(dev, new_state);
- priv->state = new_state;
-
- if (!cf)
- return;
-
- if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
- cf->can_id |= CAN_ERR_BUSOFF;
- return;
- }
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= tx_state >= rx_state ?
- can_tx_state_to_frame(dev, tx_state) : 0;
- cf->data[1] |= tx_state <= rx_state ?
- can_rx_state_to_frame(dev, rx_state) : 0;
-}
-EXPORT_SYMBOL_GPL(can_change_state);
-
-/* Local echo of CAN messages
- *
- * CAN network devices *should* support a local echo functionality
- * (see Documentation/networking/can.rst). To test the handling of CAN
- * interfaces that do not support the local echo both driver types are
- * implemented. In the case that the driver does not support the echo
- * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
- * to perform the echo as a fallback solution.
- */
-static void can_flush_echo_skb(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- int i;
-
- for (i = 0; i < priv->echo_skb_max; i++) {
- if (priv->echo_skb[i]) {
- kfree_skb(priv->echo_skb[i]);
- priv->echo_skb[i] = NULL;
- stats->tx_dropped++;
- stats->tx_aborted_errors++;
- }
- }
-}
-
-/* Put the skb on the stack to be looped backed locally lateron
- *
- * The function is typically called in the start_xmit function
- * of the device driver. The driver must protect access to
- * priv->echo_skb, if necessary.
- */
-int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- /* check flag whether this packet has to be looped back */
- if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
- (skb->protocol != htons(ETH_P_CAN) &&
- skb->protocol != htons(ETH_P_CANFD))) {
- kfree_skb(skb);
- return 0;
- }
-
- if (!priv->echo_skb[idx]) {
- skb = can_create_echo_skb(skb);
- if (!skb)
- return -ENOMEM;
-
- /* make settings for echo to reduce code in irq context */
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->dev = dev;
-
- /* save this skb for tx interrupt echo handling */
- priv->echo_skb[idx] = skb;
- } else {
- /* locking problem with netif_stop_queue() ?? */
- netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
- kfree_skb(skb);
- return -EBUSY;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_put_echo_skb);
-
-struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (idx >= priv->echo_skb_max) {
- netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
- __func__, idx, priv->echo_skb_max);
- return NULL;
- }
-
- if (priv->echo_skb[idx]) {
- /* Using "struct canfd_frame::len" for the frame
- * length is supported on both CAN and CANFD frames.
- */
- struct sk_buff *skb = priv->echo_skb[idx];
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
-
- /* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
-
- priv->echo_skb[idx] = NULL;
-
- return skb;
- }
-
- return NULL;
-}
-
-/* Get the skb from the stack and loop it back locally
- *
- * The function is typically called when the TX done interrupt
- * is handled in the device driver. The driver must protect
- * access to priv->echo_skb, if necessary.
- */
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
-{
- struct sk_buff *skb;
- u8 len;
-
- skb = __can_get_echo_skb(dev, idx, &len);
- if (!skb)
- return 0;
-
- skb_get(skb);
- if (netif_rx(skb) == NET_RX_SUCCESS)
- dev_consume_skb_any(skb);
- else
- dev_kfree_skb_any(skb);
-
- return len;
-}
-EXPORT_SYMBOL_GPL(can_get_echo_skb);
-
-/* Remove the skb from the stack and free it.
- *
- * The function is typically called when TX failed.
- */
-void can_free_echo_skb(struct net_device *dev, unsigned int idx)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- if (priv->echo_skb[idx]) {
- dev_kfree_skb_any(priv->echo_skb[idx]);
- priv->echo_skb[idx] = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(can_free_echo_skb);
-
-/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
- struct can_frame *cf;
- int err;
-
- BUG_ON(netif_carrier_ok(dev));
-
- /* No synchronization needed because the device is bus-off and
- * no messages can come in or go out.
- */
- can_flush_echo_skb(dev);
-
- /* send restart message upstream */
- skb = alloc_can_err_skb(dev, &cf);
- if (!skb)
- goto restart;
-
- cf->can_id |= CAN_ERR_RESTARTED;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
- netif_rx_ni(skb);
-
-restart:
- netdev_dbg(dev, "restarted\n");
- priv->can_stats.restarts++;
-
- /* Now restart the device */
- err = priv->do_set_mode(dev, CAN_MODE_START);
-
- netif_carrier_on(dev);
- if (err)
- netdev_err(dev, "Error %d during restart", err);
-}
-
-static void can_restart_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct can_priv *priv = container_of(dwork, struct can_priv,
- restart_work);
-
- can_restart(priv->dev);
-}
-
-int can_restart_now(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* A manual restart is only permitted if automatic restart is
- * disabled and the device is in the bus-off state
- */
- if (priv->restart_ms)
- return -EINVAL;
- if (priv->state != CAN_STATE_BUS_OFF)
- return -EBUSY;
-
- cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
-
- return 0;
-}
-
-/* CAN bus-off
- *
- * This functions should be called when the device goes bus-off to
- * tell the netif layer that no more packets can be sent or received.
- * If enabled, a timer is started to trigger bus-off recovery.
- */
-void can_bus_off(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (priv->restart_ms)
- netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
- priv->restart_ms);
- else
- netdev_info(dev, "bus-off\n");
-
- netif_carrier_off(dev);
-
- if (priv->restart_ms)
- schedule_delayed_work(&priv->restart_work,
- msecs_to_jiffies(priv->restart_ms));
-}
-EXPORT_SYMBOL_GPL(can_bus_off);
-
-static void can_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_CAN;
- dev->mtu = CAN_MTU;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
-{
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct can_frame));
- if (unlikely(!skb))
- return NULL;
-
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- *cf = skb_put_zero(skb, sizeof(struct can_frame));
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_can_skb);
-
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd)
-{
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
- sizeof(struct canfd_frame));
- if (unlikely(!skb))
- return NULL;
-
- skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_canfd_skb);
-
-struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
-{
- struct sk_buff *skb;
-
- skb = alloc_can_skb(dev, cf);
- if (unlikely(!skb))
- return NULL;
-
- (*cf)->can_id = CAN_ERR_FLAG;
- (*cf)->len = CAN_ERR_DLC;
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(alloc_can_err_skb);
-
-/* Allocate and setup space for the CAN network device */
-struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
- unsigned int txqs, unsigned int rxqs)
-{
- struct net_device *dev;
- struct can_priv *priv;
- int size;
-
- /* We put the driver's priv, the CAN mid layer priv and the
- * echo skb into the netdevice's priv. The memory layout for
- * the netdev_priv is like this:
- *
- * +-------------------------+
- * | driver's priv |
- * +-------------------------+
- * | struct can_ml_priv |
- * +-------------------------+
- * | array of struct sk_buff |
- * +-------------------------+
- */
-
- size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
-
- if (echo_skb_max)
- size = ALIGN(size, sizeof(struct sk_buff *)) +
- echo_skb_max * sizeof(struct sk_buff *);
-
- dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
- txqs, rxqs);
- if (!dev)
- return NULL;
-
- priv = netdev_priv(dev);
- priv->dev = dev;
-
- dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
-
- if (echo_skb_max) {
- priv->echo_skb_max = echo_skb_max;
- priv->echo_skb = (void *)priv +
- (size - echo_skb_max * sizeof(struct sk_buff *));
- }
-
- priv->state = CAN_STATE_STOPPED;
-
- INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(alloc_candev_mqs);
-
-/* Free space of the CAN network device */
-void free_candev(struct net_device *dev)
-{
- free_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(free_candev);
-
-/* changing MTU and control mode for CAN/CANFD devices */
-int can_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* Do not allow changing the MTU while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* allow change of MTU according to the CANFD ability of the device */
- switch (new_mtu) {
- case CAN_MTU:
- /* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
- return -EINVAL;
-
- priv->ctrlmode &= ~CAN_CTRLMODE_FD;
- break;
-
- case CANFD_MTU:
- /* check for potential CANFD ability */
- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
- return -EINVAL;
-
- priv->ctrlmode |= CAN_CTRLMODE_FD;
- break;
-
- default:
- return -EINVAL;
- }
-
- dev->mtu = new_mtu;
- return 0;
-}
-EXPORT_SYMBOL_GPL(can_change_mtu);
-
-/* Common open function when the device gets opened.
- *
- * This function should be called in the open function of the device
- * driver.
- */
-int open_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (!priv->bittiming.bitrate) {
- netdev_err(dev, "bit-timing not yet defined\n");
- return -EINVAL;
- }
-
- /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
- if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
- netdev_err(dev, "incorrect/missing data bit-timing\n");
- return -EINVAL;
- }
-
- /* Switch carrier on if device was stopped while in bus-off state */
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(open_candev);
-
-#ifdef CONFIG_OF
-/* Common function that can be used to understand the limitation of
- * a transceiver when it provides no means to determine these limitations
- * at runtime.
- */
-void of_can_transceiver(struct net_device *dev)
-{
- struct device_node *dn;
- struct can_priv *priv = netdev_priv(dev);
- struct device_node *np = dev->dev.parent->of_node;
- int ret;
-
- dn = of_get_child_by_name(np, "can-transceiver");
- if (!dn)
- return;
-
- ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
- of_node_put(dn);
- if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
- netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
-}
-EXPORT_SYMBOL_GPL(of_can_transceiver);
-#endif
-
-/* Common close function for cleanup before the device gets closed.
- *
- * This function should be called in the close function of the device
- * driver.
- */
-void close_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- cancel_delayed_work_sync(&priv->restart_work);
- can_flush_echo_skb(dev);
-}
-EXPORT_SYMBOL_GPL(close_candev);
-
-/* CAN netlink interface */
-static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
- [IFLA_CAN_STATE] = { .type = NLA_U32 },
- [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
- [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
- [IFLA_CAN_RESTART] = { .type = NLA_U32 },
- [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
- [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
- [IFLA_CAN_DATA_BITTIMING]
- = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST]
- = { .len = sizeof(struct can_bittiming_const) },
- [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
-};
-
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- bool is_can_fd = false;
-
- /* Make sure that valid CAN FD configurations always consist of
- * - nominal/arbitration bittiming
- * - data bittiming
- * - control mode with CAN_CTRLMODE_FD set
- */
-
- if (!data)
- return 0;
-
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
- }
-
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
- return -EOPNOTSUPP;
- }
-
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- struct can_priv *priv = netdev_priv(dev);
- int err;
-
- /* We need synchronization with dev->stop() */
- ASSERT_RTNL();
-
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->bittiming_const && !priv->do_set_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_get_bittiming(dev, &bt,
- priv->bittiming_const,
- priv->bitrate_const,
- priv->bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->bittiming, &bt, sizeof(bt));
-
- if (priv->do_set_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_bittiming(dev);
- if (err)
- return err;
- }
- }
-
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
-
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = priv->ctrlmode_static;
- maskedflags = cm->flags & cm->mask;
-
- /* check whether provided bits are allowed to be passed */
- if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
-
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
-
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
- return -EOPNOTSUPP;
-
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
-
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
- else
- dev->mtu = CAN_MTU;
- }
-
- if (data[IFLA_CAN_RESTART_MS]) {
- /* Do not allow changing restart delay while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
- }
-
- if (data[IFLA_CAN_RESTART]) {
- /* Do not allow a restart while not running */
- if (!(dev->flags & IFF_UP))
- return -EINVAL;
- err = can_restart_now(dev);
- if (err)
- return err;
- }
-
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
-
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
-
- if (data[IFLA_CAN_TERMINATION]) {
- const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
- const unsigned int num_term = priv->termination_const_cnt;
- unsigned int i;
-
- if (!priv->do_set_termination)
- return -EOPNOTSUPP;
-
- /* check whether given value is supported by the interface */
- for (i = 0; i < num_term; i++) {
- if (termval == priv->termination_const[i])
- break;
- }
- if (i >= num_term)
- return -EINVAL;
-
- /* Finally, set the termination value */
- err = priv->do_set_termination(dev, termval);
- if (err)
- return err;
-
- priv->termination = termval;
- }
-
- return 0;
-}
-
-static size_t can_get_size(const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- size_t size = 0;
-
- if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
- size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
- if (priv->termination_const) {
- size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
- size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
- priv->termination_const_cnt);
- }
- if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
- size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
-
- return size;
-}
-
-static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- struct can_ctrlmode cm = {.flags = priv->ctrlmode};
- struct can_berr_counter bec;
- enum can_state state = priv->state;
-
- if (priv->do_get_state)
- priv->do_get_state(dev, &state);
-
- if ((priv->bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
-
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
-
- nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
- nla_put_u32(skb, IFLA_CAN_STATE, state) ||
- nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
- nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
-
- (priv->do_get_berr_counter &&
- !priv->do_get_berr_counter(dev, &bec) &&
- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
-
- (priv->data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
-
- (priv->data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
-
- (priv->termination_const &&
- (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
- nla_put(skb, IFLA_CAN_TERMINATION_CONST,
- sizeof(*priv->termination_const) *
- priv->termination_const_cnt,
- priv->termination_const))) ||
-
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
-
- (priv->data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
-
- (nla_put(skb, IFLA_CAN_BITRATE_MAX,
- sizeof(priv->bitrate_max),
- &priv->bitrate_max))
- )
-
- return -EMSGSIZE;
-
- return 0;
-}
-
-static size_t can_get_xstats_size(const struct net_device *dev)
-{
- return sizeof(struct can_device_stats);
-}
-
-static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (nla_put(skb, IFLA_INFO_XSTATS,
- sizeof(priv->can_stats), &priv->can_stats))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- return -EOPNOTSUPP;
-}
-
-static void can_dellink(struct net_device *dev, struct list_head *head)
-{
-}
-
-static struct rtnl_link_ops can_link_ops __read_mostly = {
- .kind = "can",
- .maxtype = IFLA_CAN_MAX,
- .policy = can_policy,
- .setup = can_setup,
- .validate = can_validate,
- .newlink = can_newlink,
- .changelink = can_changelink,
- .dellink = can_dellink,
- .get_size = can_get_size,
- .fill_info = can_fill_info,
- .get_xstats_size = can_get_xstats_size,
- .fill_xstats = can_fill_xstats,
-};
-
-/* Register the CAN network device */
-int register_candev(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* Ensure termination_const, termination_const_cnt and
- * do_set_termination consistency. All must be either set or
- * unset.
- */
- if ((!priv->termination_const != !priv->termination_const_cnt) ||
- (!priv->termination_const != !priv->do_set_termination))
- return -EINVAL;
-
- if (!priv->bitrate_const != !priv->bitrate_const_cnt)
- return -EINVAL;
-
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
- return -EINVAL;
-
- dev->rtnl_link_ops = &can_link_ops;
- netif_carrier_off(dev);
-
- return register_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(register_candev);
-
-/* Unregister the CAN network device */
-void unregister_candev(struct net_device *dev)
-{
- unregister_netdev(dev);
-}
-EXPORT_SYMBOL_GPL(unregister_candev);
-
-/* Test if a network device is a candev based device
- * and return the can_priv* if so.
- */
-struct can_priv *safe_candev_priv(struct net_device *dev)
-{
- if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
- return NULL;
-
- return netdev_priv(dev);
-}
-EXPORT_SYMBOL_GPL(safe_candev_priv);
-
-static __init int can_dev_init(void)
-{
- int err;
-
- can_led_notifier_init();
-
- err = rtnl_link_register(&can_link_ops);
- if (!err)
- pr_info(MOD_DESC "\n");
-
- return err;
-}
-module_init(can_dev_init);
-
-static __exit void can_dev_exit(void)
-{
- rtnl_link_unregister(&can_link_ops);
-
- can_led_notifier_exit();
-}
-module_exit(can_dev_exit);
-
-MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
new file mode 100644
index 000000000000..3e2e207861fc
--- /dev/null
+++ b/drivers/net/can/dev/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+can-dev-y += bittiming.o
+can-dev-y += dev.o
+can-dev-y += length.o
+can-dev-y += netlink.o
+can-dev-y += rx-offload.o
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_LEDS) += led.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
new file mode 100644
index 000000000000..f7fe226bb395
--- /dev/null
+++ b/drivers/net/can/dev/bittiming.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ unsigned int sample_point_nominal, unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if (sample_point <= sample_point_nominal &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+
+ /* Use CiA recommended sample points */
+ if (bt->sample_point) {
+ sample_point_nominal = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800000)
+ sample_point_nominal = 750;
+ else if (bt->bitrate > 500000)
+ sample_point_nominal = 800;
+ else
+ sample_point_nominal = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error > best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-tenth of a percent */
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ netdev_err(dev,
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EDOM;
+ }
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ /* check for sjw user settings */
+ if (!bt->sjw || !btc->sjw_max) {
+ bt->sjw = 1;
+ } else {
+ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+ if (bt->sjw > btc->sjw_max)
+ bt->sjw = btc->sjw_max;
+ /* bt->sjw must not be higher than tseg2 */
+ if (tseg2 < bt->sjw)
+ bt->sjw = tseg2;
+ }
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+
+ return 0;
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+/* Checks the validity of the specified bit-timing parameters prop_seg,
+ * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
+ * prescaler value brp. You can find more information in the header
+ * file linux/can/netlink.h.
+ */
+static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int tseg1, alltseg;
+ u64 brp64;
+
+ tseg1 = bt->prop_seg + bt->phase_seg1;
+ if (!bt->sjw)
+ bt->sjw = 1;
+ if (bt->sjw > btc->sjw_max ||
+ tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
+ bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
+ return -ERANGE;
+
+ brp64 = (u64)priv->clock.freq * (u64)bt->tq;
+ if (btc->brp_inc > 1)
+ do_div(brp64, btc->brp_inc);
+ brp64 += 500000000UL - 1;
+ do_div(brp64, 1000000000UL); /* the practicable BRP */
+ if (btc->brp_inc > 1)
+ brp64 *= btc->brp_inc;
+ bt->brp = (u32)brp64;
+
+ if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ return -EINVAL;
+
+ alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
+ bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
+ bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+
+ return 0;
+}
+
+/* Checks the validity of predefined bitrate settings */
+static int
+can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < bitrate_const_cnt; i++) {
+ if (bt->bitrate == bitrate_const[i])
+ break;
+ }
+
+ if (i >= priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
+int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ int err;
+
+ /* Depending on the given can_bittiming parameter structure the CAN
+ * timing parameters are calculated based on the provided bitrate OR
+ * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+ * provided directly which are then checked and fixed up.
+ */
+ if (!bt->tq && bt->bitrate && btc)
+ err = can_calc_bittiming(dev, bt, btc);
+ else if (bt->tq && !bt->bitrate && btc)
+ err = can_fixup_bittiming(dev, bt, btc);
+ else if (!bt->tq && bt->bitrate && bitrate_const)
+ err = can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt);
+ else
+ err = -EINVAL;
+
+ return err;
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
new file mode 100644
index 000000000000..01e4a194f187
--- /dev/null
+++ b/drivers/net/can/dev/dev.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/workqueue.h>
+#include <linux/can.h>
+#include <linux/can/can-ml.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+#include <linux/can/led.h>
+#include <linux/of.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+
+static void can_update_state_error_stats(struct net_device *dev,
+ enum can_state new_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (new_state <= priv->state)
+ return;
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_WARNING:
+ priv->can_stats.error_warning++;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can_stats.error_passive++;
+ break;
+ case CAN_STATE_BUS_OFF:
+ priv->can_stats.bus_off++;
+ break;
+ default:
+ break;
+ }
+}
+
+static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_TX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_TX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_RX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_RX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+static const char *can_get_state_str(const enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return "Error Active";
+ case CAN_STATE_ERROR_WARNING:
+ return "Error Warning";
+ case CAN_STATE_ERROR_PASSIVE:
+ return "Error Passive";
+ case CAN_STATE_BUS_OFF:
+ return "Bus Off";
+ case CAN_STATE_STOPPED:
+ return "Stopped";
+ case CAN_STATE_SLEEPING:
+ return "Sleeping";
+ default:
+ return "<unknown>";
+ }
+
+ return "<unknown>";
+}
+
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ enum can_state new_state = max(tx_state, rx_state);
+
+ if (unlikely(new_state == priv->state)) {
+ netdev_warn(dev, "%s: oops, state did not change", __func__);
+ return;
+ }
+
+ netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+ can_get_state_str(priv->state), priv->state,
+ can_get_state_str(new_state), new_state);
+
+ can_update_state_error_stats(dev, new_state);
+ priv->state = new_state;
+
+ if (!cf)
+ return;
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_BUSOFF;
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= tx_state >= rx_state ?
+ can_tx_state_to_frame(dev, tx_state) : 0;
+ cf->data[1] |= tx_state <= rx_state ?
+ can_rx_state_to_frame(dev, rx_state) : 0;
+}
+EXPORT_SYMBOL_GPL(can_change_state);
+
+/* CAN device restart for bus-off recovery */
+static void can_restart(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int err;
+
+ BUG_ON(netif_carrier_ok(dev));
+
+ /* No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+ */
+ can_flush_echo_skb(dev);
+
+ /* send restart message upstream */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (!skb)
+ goto restart;
+
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->len;
+
+ netif_rx_ni(skb);
+
+restart:
+ netdev_dbg(dev, "restarted\n");
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+ err = priv->do_set_mode(dev, CAN_MODE_START);
+
+ netif_carrier_on(dev);
+ if (err)
+ netdev_err(dev, "Error %d during restart", err);
+}
+
+static void can_restart_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct can_priv *priv = container_of(dwork, struct can_priv,
+ restart_work);
+
+ can_restart(priv->dev);
+}
+
+int can_restart_now(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* A manual restart is only permitted if automatic restart is
+ * disabled and the device is in the bus-off state
+ */
+ if (priv->restart_ms)
+ return -EINVAL;
+ if (priv->state != CAN_STATE_BUS_OFF)
+ return -EBUSY;
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_restart(dev);
+
+ return 0;
+}
+
+/* CAN bus-off
+ *
+ * This functions should be called when the device goes bus-off to
+ * tell the netif layer that no more packets can be sent or received.
+ * If enabled, a timer is started to trigger bus-off recovery.
+ */
+void can_bus_off(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->restart_ms)
+ netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+ priv->restart_ms);
+ else
+ netdev_info(dev, "bus-off\n");
+
+ netif_carrier_off(dev);
+
+ if (priv->restart_ms)
+ schedule_delayed_work(&priv->restart_work,
+ msecs_to_jiffies(priv->restart_ms));
+}
+EXPORT_SYMBOL_GPL(can_bus_off);
+
+void can_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_CAN;
+ dev->mtu = CAN_MTU;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_HW_CSUM;
+}
+
+/* Allocate and setup space for the CAN network device */
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs)
+{
+ struct net_device *dev;
+ struct can_priv *priv;
+ int size;
+
+ /* We put the driver's priv, the CAN mid layer priv and the
+ * echo skb into the netdevice's priv. The memory layout for
+ * the netdev_priv is like this:
+ *
+ * +-------------------------+
+ * | driver's priv |
+ * +-------------------------+
+ * | struct can_ml_priv |
+ * +-------------------------+
+ * | array of struct sk_buff |
+ * +-------------------------+
+ */
+
+ size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
+
+ if (echo_skb_max)
+ size = ALIGN(size, sizeof(struct sk_buff *)) +
+ echo_skb_max * sizeof(struct sk_buff *);
+
+ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+ txqs, rxqs);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+
+ dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+
+ if (echo_skb_max) {
+ priv->echo_skb_max = echo_skb_max;
+ priv->echo_skb = (void *)priv +
+ (size - echo_skb_max * sizeof(struct sk_buff *));
+ }
+
+ priv->state = CAN_STATE_STOPPED;
+
+ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_candev_mqs);
+
+/* Free space of the CAN network device */
+void free_candev(struct net_device *dev)
+{
+ free_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(free_candev);
+
+/* changing MTU and control mode for CAN/CANFD devices */
+int can_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* allow change of MTU according to the CANFD ability of the device */
+ switch (new_mtu) {
+ case CAN_MTU:
+ /* 'CANFD-only' controllers can not switch to CAN_MTU */
+ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+ return -EINVAL;
+
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ break;
+
+ case CANFD_MTU:
+ /* check for potential CANFD ability */
+ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ return -EINVAL;
+
+ priv->ctrlmode |= CAN_CTRLMODE_FD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_change_mtu);
+
+/* Common open function when the device gets opened.
+ *
+ * This function should be called in the open function of the device
+ * driver.
+ */
+int open_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (!priv->bittiming.bitrate) {
+ netdev_err(dev, "bit-timing not yet defined\n");
+ return -EINVAL;
+ }
+
+ /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+ if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+ (!priv->data_bittiming.bitrate ||
+ priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ netdev_err(dev, "incorrect/missing data bit-timing\n");
+ return -EINVAL;
+ }
+
+ /* Switch carrier on if device was stopped while in bus-off state */
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(open_candev);
+
+#ifdef CONFIG_OF
+/* Common function that can be used to understand the limitation of
+ * a transceiver when it provides no means to determine these limitations
+ * at runtime.
+ */
+void of_can_transceiver(struct net_device *dev)
+{
+ struct device_node *dn;
+ struct can_priv *priv = netdev_priv(dev);
+ struct device_node *np = dev->dev.parent->of_node;
+ int ret;
+
+ dn = of_get_child_by_name(np, "can-transceiver");
+ if (!dn)
+ return;
+
+ ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+ of_node_put(dn);
+ if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+ netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+}
+EXPORT_SYMBOL_GPL(of_can_transceiver);
+#endif
+
+/* Common close function for cleanup before the device gets closed.
+ *
+ * This function should be called in the close function of the device
+ * driver.
+ */
+void close_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_flush_echo_skb(dev);
+}
+EXPORT_SYMBOL_GPL(close_candev);
+
+/* Register the CAN network device */
+int register_candev(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Ensure termination_const, termination_const_cnt and
+ * do_set_termination consistency. All must be either set or
+ * unset.
+ */
+ if ((!priv->termination_const != !priv->termination_const_cnt) ||
+ (!priv->termination_const != !priv->do_set_termination))
+ return -EINVAL;
+
+ if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ return -EINVAL;
+
+ dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
+ return register_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(register_candev);
+
+/* Unregister the CAN network device */
+void unregister_candev(struct net_device *dev)
+{
+ unregister_netdev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_candev);
+
+/* Test if a network device is a candev based device
+ * and return the can_priv* if so.
+ */
+struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
+ return NULL;
+
+ return netdev_priv(dev);
+}
+EXPORT_SYMBOL_GPL(safe_candev_priv);
+
+static __init int can_dev_init(void)
+{
+ int err;
+
+ can_led_notifier_init();
+
+ err = can_netlink_register();
+ if (!err)
+ pr_info(MOD_DESC "\n");
+
+ return err;
+}
+module_init(can_dev_init);
+
+static __exit void can_dev_exit(void)
+{
+ can_netlink_unregister();
+
+ can_led_notifier_exit();
+}
+module_exit(can_dev_exit);
+
+MODULE_ALIAS_RTNL_LINK("can");
diff --git a/drivers/net/can/dev/length.c b/drivers/net/can/dev/length.c
new file mode 100644
index 000000000000..d35c4e82314d
--- /dev/null
+++ b/drivers/net/can/dev/length.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2012, 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ */
+
+#include <linux/can/dev.h>
+
+/* CAN DLC to real data length conversion helpers */
+
+static const u8 dlc2len[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 12, 16, 20, 24, 32, 48, 64
+};
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc)
+{
+ return dlc2len[dlc & 0x0F];
+}
+EXPORT_SYMBOL_GPL(can_fd_dlc2len);
+
+static const u8 len2dlc[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+ 9, 9, 9, 9, /* 9 - 12 */
+ 10, 10, 10, 10, /* 13 - 16 */
+ 11, 11, 11, 11, /* 17 - 20 */
+ 12, 12, 12, 12, /* 21 - 24 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+};
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len)
+{
+ if (len >= ARRAY_SIZE(len2dlc))
+ return CANFD_MAX_DLC;
+
+ return len2dlc[len];
+}
+EXPORT_SYMBOL_GPL(can_fd_len2dlc);
+
+/**
+ * can_skb_get_frame_len() - Calculate the CAN Frame length in bytes
+ * of a given skb.
+ * @skb: socket buffer of a CAN message.
+ *
+ * Do a rough calculation: bit stuffing is ignored and length in bits
+ * is rounded up to a length in bytes.
+ *
+ * Rationale: this function is to be used for the BQL functions
+ * (netdev_sent_queue() and netdev_completed_queue()) which expect a
+ * value in bytes. Just using skb->len is insufficient because it will
+ * return the constant value of CAN(FD)_MTU. Doing the bit stuffing
+ * calculation would be too expensive in term of computing resources
+ * for no noticeable gain.
+ *
+ * Remarks: The payload of CAN FD frames with BRS flag are sent at a
+ * different bitrate. Currently, the can-utils canbusload tool does
+ * not support CAN-FD yet and so we could not run any benchmark to
+ * measure the impact. There might be possible improvement here.
+ *
+ * Return: length in bytes.
+ */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb)
+{
+ const struct canfd_frame *cf = (const struct canfd_frame *)skb->data;
+ u8 len;
+
+ if (can_is_canfd_skb(skb))
+ len = canfd_sanitize_len(cf->len);
+ else if (cf->can_id & CAN_RTR_FLAG)
+ len = 0;
+ else
+ len = cf->len;
+
+ if (can_is_canfd_skb(skb)) {
+ if (cf->can_id & CAN_EFF_FLAG)
+ len += CANFD_FRAME_OVERHEAD_EFF;
+ else
+ len += CANFD_FRAME_OVERHEAD_SFF;
+ } else {
+ if (cf->can_id & CAN_EFF_FLAG)
+ len += CAN_FRAME_OVERHEAD_EFF;
+ else
+ len += CAN_FRAME_OVERHEAD_SFF;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_skb_get_frame_len);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
new file mode 100644
index 000000000000..3ae884cdf677
--- /dev/null
+++ b/drivers/net/can/dev/netlink.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+#include <net/rtnetlink.h>
+
+static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ [IFLA_CAN_STATE] = { .type = NLA_U32 },
+ [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
+ [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
+ [IFLA_CAN_RESTART] = { .type = NLA_U32 },
+ [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+ [IFLA_CAN_DATA_BITTIMING]
+ = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_DATA_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+};
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ bool is_can_fd = false;
+
+ /* Make sure that valid CAN FD configurations always consist of
+ * - nominal/arbitration bittiming
+ * - data bittiming
+ * - control mode with CAN_CTRLMODE_FD set
+ */
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ }
+
+ if (is_can_fd) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming)
+ return -EOPNOTSUPP;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->bittiming, &bt, sizeof(bt));
+
+ if (priv->do_set_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic;
+ u32 maskedflags;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = priv->ctrlmode_static;
+ maskedflags = cm->flags & cm->mask;
+
+ /* check whether provided bits are allowed to be passed */
+ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+ return -EOPNOTSUPP;
+
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ /* make sure static options are provided by configuration */
+ if ((maskedflags & ctrlstatic) != ctrlstatic)
+ return -EOPNOTSUPP;
+
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+ else
+ dev->mtu = CAN_MTU;
+ }
+
+ if (data[IFLA_CAN_RESTART_MS]) {
+ /* Do not allow changing restart delay while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ }
+
+ if (data[IFLA_CAN_RESTART]) {
+ /* Do not allow a restart while not running */
+ if (!(dev->flags & IFF_UP))
+ return -EINVAL;
+ err = can_restart_now(dev);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming dbt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * data_bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ return -EOPNOTSUPP;
+
+ memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+ sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt,
+ priv->data_bittiming_const,
+ priv->data_bitrate_const,
+ priv->data_bitrate_const_cnt);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+
+ if (priv->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
+ if (data[IFLA_CAN_TERMINATION]) {
+ const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+ const unsigned int num_term = priv->termination_const_cnt;
+ unsigned int i;
+
+ if (!priv->do_set_termination)
+ return -EOPNOTSUPP;
+
+ /* check whether given value is supported by the interface */
+ for (i = 0; i < num_term; i++) {
+ if (termval == priv->termination_const[i])
+ break;
+ }
+ if (i >= num_term)
+ return -EINVAL;
+
+ /* Finally, set the termination value */
+ err = priv->do_set_termination(dev, termval);
+ if (err)
+ return err;
+
+ priv->termination = termval;
+ }
+
+ return 0;
+}
+
+static size_t can_get_size(const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ size_t size = 0;
+
+ if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
+ if (priv->termination_const) {
+ size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
+ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
+ priv->termination_const_cnt);
+ }
+ if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt);
+ if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt);
+ size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
+
+ return size;
+}
+
+static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+ struct can_berr_counter bec;
+ enum can_state state = priv->state;
+
+ if (priv->do_get_state)
+ priv->do_get_state(dev, &state);
+
+ if ((priv->bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_BITTIMING,
+ sizeof(priv->bittiming), &priv->bittiming)) ||
+
+ (priv->bittiming_const &&
+ nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+ sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+
+ nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
+ nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+ nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+ nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+
+ (priv->do_get_berr_counter &&
+ !priv->do_get_berr_counter(dev, &bec) &&
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+
+ (priv->data_bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+ sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+
+ (priv->data_bittiming_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ sizeof(*priv->data_bittiming_const),
+ priv->data_bittiming_const)) ||
+
+ (priv->termination_const &&
+ (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+ nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+ sizeof(*priv->termination_const) *
+ priv->termination_const_cnt,
+ priv->termination_const))) ||
+
+ (priv->bitrate_const &&
+ nla_put(skb, IFLA_CAN_BITRATE_CONST,
+ sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt,
+ priv->bitrate_const)) ||
+
+ (priv->data_bitrate_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt,
+ priv->data_bitrate_const)) ||
+
+ (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+ sizeof(priv->bitrate_max),
+ &priv->bitrate_max))
+ )
+
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static size_t can_get_xstats_size(const struct net_device *dev)
+{
+ return sizeof(struct can_device_stats);
+}
+
+static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (nla_put(skb, IFLA_INFO_XSTATS,
+ sizeof(priv->can_stats), &priv->can_stats))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int can_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+}
+
+struct rtnl_link_ops can_link_ops __read_mostly = {
+ .kind = "can",
+ .maxtype = IFLA_CAN_MAX,
+ .policy = can_policy,
+ .setup = can_setup,
+ .validate = can_validate,
+ .newlink = can_newlink,
+ .changelink = can_changelink,
+ .dellink = can_dellink,
+ .get_size = can_get_size,
+ .fill_info = can_fill_info,
+ .get_xstats_size = can_get_xstats_size,
+ .fill_xstats = can_fill_xstats,
+};
+
+int can_netlink_register(void)
+{
+ return rtnl_link_register(&can_link_ops);
+}
+
+void can_netlink_unregister(void)
+{
+ rtnl_link_unregister(&can_link_ops);
+}
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 3c1912c0430b..ab2c1543786c 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -263,7 +263,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp)
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr)
{
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
@@ -271,7 +272,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
u8 len;
int err;
- skb = __can_get_echo_skb(dev, idx, &len);
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
new file mode 100644
index 000000000000..6a64fe410987
--- /dev/null
+++ b/drivers/net/can/dev/skb.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/can/dev.h>
+
+/* Local echo of CAN messages
+ *
+ * CAN network devices *should* support a local echo functionality
+ * (see Documentation/networking/can.rst). To test the handling of CAN
+ * interfaces that do not support the local echo both driver types are
+ * implemented. In the case that the driver does not support the echo
+ * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
+ * to perform the echo as a fallback solution.
+ */
+void can_flush_echo_skb(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ int i;
+
+ for (i = 0; i < priv->echo_skb_max; i++) {
+ if (priv->echo_skb[i]) {
+ kfree_skb(priv->echo_skb[i]);
+ priv->echo_skb[i] = NULL;
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+ }
+ }
+}
+
+/* Put the skb on the stack to be looped backed locally lateron
+ *
+ * The function is typically called in the start_xmit function
+ * of the device driver. The driver must protect access to
+ * priv->echo_skb, if necessary.
+ */
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+ (skb->protocol != htons(ETH_P_CAN) &&
+ skb->protocol != htons(ETH_P_CANFD))) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (!priv->echo_skb[idx]) {
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return -ENOMEM;
+
+ /* make settings for echo to reduce code in irq context */
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->dev = dev;
+
+ /* save frame_len to reuse it when transmission is completed */
+ can_skb_prv(skb)->frame_len = frame_len;
+
+ skb_tx_timestamp(skb);
+
+ /* save this skb for tx interrupt echo handling */
+ priv->echo_skb[idx] = skb;
+ } else {
+ /* locking problem with netif_stop_queue() ?? */
+ netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
+ kfree_skb(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_put_echo_skb);
+
+struct sk_buff *
+__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
+ unsigned int *frame_len_ptr)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return NULL;
+ }
+
+ if (priv->echo_skb[idx]) {
+ /* Using "struct canfd_frame::len" for the frame
+ * length is supported on both CAN and CANFD frames.
+ */
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+
+ /* get the real payload length for netdev statistics */
+ if (cf->can_id & CAN_RTR_FLAG)
+ *len_ptr = 0;
+ else
+ *len_ptr = cf->len;
+
+ if (frame_len_ptr)
+ *frame_len_ptr = can_skb_priv->frame_len;
+
+ priv->echo_skb[idx] = NULL;
+
+ return skb;
+ }
+
+ return NULL;
+}
+
+/* Get the skb from the stack and loop it back locally
+ *
+ * The function is typically called when the TX done interrupt
+ * is handled in the device driver. The driver must protect
+ * access to priv->echo_skb, if necessary.
+ */
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr)
+{
+ struct sk_buff *skb;
+ u8 len;
+
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
+ if (!skb)
+ return 0;
+
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_get_echo_skb);
+
+/* Remove the skb from the stack and free it.
+ *
+ * The function is typically called when TX failed.
+ */
+void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ BUG_ON(idx >= priv->echo_skb_max);
+
+ if (priv->echo_skb[idx]) {
+ dev_kfree_skb_any(priv->echo_skb[idx]);
+ priv->echo_skb[idx] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(can_free_echo_skb);
+
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct can_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ *cf = skb_put_zero(skb, sizeof(struct can_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_skb);
+
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
+struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, cf);
+ if (unlikely(!skb))
+ return NULL;
+
+ (*cf)->can_id = CAN_ERR_FLAG;
+ (*cf)->len = CAN_ERR_DLC;
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_err_skb);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 038fe1036df2..5d9157c655e9 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -9,6 +9,7 @@
//
// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/bitfield.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -17,6 +18,7 @@
#include <linux/can/rx-offload.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
@@ -242,6 +244,8 @@
#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
/* support memory detection and correction */
#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
+/* Setup stop mode with SCU firmware to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
/* Structure of the message buffer */
struct flexcan_mb {
@@ -347,6 +351,7 @@ struct flexcan_priv {
u8 mb_count;
u8 mb_size;
u8 clk_src; /* clock source of CAN Protocol Engine */
+ u8 scu_idx;
u64 rx_mask;
u64 tx_mask;
@@ -358,6 +363,9 @@ struct flexcan_priv {
struct regulator *reg_xceiver;
struct flexcan_stop_mode stm;
+ /* IPC handle when setup stop mode by System Controller firmware(scfw) */
+ struct imx_sc_ipc *sc_ipc_handle;
+
/* Read and Write APIs */
u32 (*read)(void __iomem *addr);
void (*write)(u32 val, void __iomem *addr);
@@ -387,7 +395,7 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPORT_FD,
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
@@ -546,18 +554,42 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
priv->write(reg_mcr, &regs->mcr);
}
+static int flexcan_stop_mode_enable_scfw(struct flexcan_priv *priv, bool enabled)
+{
+ u8 idx = priv->scu_idx;
+ u32 rsrc_id, val;
+
+ rsrc_id = IMX_SC_R_CAN(idx);
+
+ if (enabled)
+ val = 1;
+ else
+ val = 0;
+
+ /* stop mode request via scu firmware */
+ return imx_sc_misc_set_control(priv->sc_ipc_handle, rsrc_id,
+ IMX_SC_C_IPG_STOP, val);
+}
+
static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr;
+ int ret;
reg_mcr = priv->read(&regs->mcr);
reg_mcr |= FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
/* enable stop request */
- regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
- 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ ret = flexcan_stop_mode_enable_scfw(priv, true);
+ if (ret < 0)
+ return ret;
+ } else {
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ }
return flexcan_low_power_enter_ack(priv);
}
@@ -566,10 +598,17 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr;
+ int ret;
/* remove stop request */
- regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
- 1 << priv->stm.req_bit, 0);
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ ret = flexcan_stop_mode_enable_scfw(priv, false);
+ if (ret < 0)
+ return ret;
+ } else {
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 0);
+ }
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
@@ -776,7 +815,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]);
}
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
priv->write(can_id, &priv->tx_mb->can_id);
priv->write(ctrl, &priv->tx_mb->can_ctrl);
@@ -1083,8 +1122,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl);
handled = IRQ_HANDLED;
- stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
- 0, reg_ctrl << 16);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload, 0,
+ reg_ctrl << 16, NULL);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
@@ -1867,7 +1907,7 @@ static void unregister_flexcandev(struct net_device *dev)
unregister_candev(dev);
}
-static int flexcan_setup_stop_mode(struct platform_device *pdev)
+static int flexcan_setup_stop_mode_gpr(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
@@ -1912,11 +1952,6 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
"gpr %s req_gpr=0x02%x req_bit=%u\n",
gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit);
- device_set_wakeup_capable(&pdev->dev, true);
-
- if (of_property_read_bool(np, "wakeup-source"))
- device_set_wakeup_enable(&pdev->dev, true);
-
return 0;
out_put_node:
@@ -1924,6 +1959,58 @@ out_put_node:
return ret;
}
+static int flexcan_setup_stop_mode_scfw(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv;
+ u8 scu_idx;
+ int ret;
+
+ ret = of_property_read_u8(pdev->dev.of_node, "fsl,scu-index", &scu_idx);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "failed to get scu index\n");
+ return ret;
+ }
+
+ priv = netdev_priv(dev);
+ priv->scu_idx = scu_idx;
+
+ /* this function could be defered probe, return -EPROBE_DEFER */
+ return imx_scu_get_handle(&priv->sc_ipc_handle);
+}
+
+/* flexcan_setup_stop_mode - Setup stop mode for wakeup
+ *
+ * Return: = 0 setup stop mode successfully or doesn't support this feature
+ * < 0 fail to setup stop mode (could be defered probe)
+ */
+static int flexcan_setup_stop_mode(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv;
+ int ret;
+
+ priv = netdev_priv(dev);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
+ ret = flexcan_setup_stop_mode_scfw(pdev);
+ else if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
+ ret = flexcan_setup_stop_mode_gpr(pdev);
+ else
+ /* return 0 directly if doesn't support stop mode feature */
+ return 0;
+
+ if (ret)
+ return ret;
+
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ if (of_property_read_bool(pdev->dev.of_node, "wakeup-source"))
+ device_set_wakeup_enable(&pdev->dev, true);
+
+ return 0;
+}
+
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
@@ -2054,17 +2141,20 @@ static int flexcan_probe(struct platform_device *pdev)
goto failed_register;
}
+ err = flexcan_setup_stop_mode(pdev);
+ if (err < 0) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "setup stop mode failed\n");
+ goto failed_setup_stop_mode;
+ }
+
of_can_transceiver(dev);
devm_can_led_init(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
- err = flexcan_setup_stop_mode(pdev);
- if (err)
- dev_dbg(&pdev->dev, "failed to setup stop-mode\n");
- }
-
return 0;
+ failed_setup_stop_mode:
+ unregister_flexcandev(dev);
failed_register:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index f5d94a692576..4a8453290530 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -517,7 +517,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
stats->tx_packets++;
stats->tx_bytes += priv->txdlc[i];
priv->txdlc[i] = 0;
- can_get_echo_skb(dev, i);
+ can_get_echo_skb(dev, i, NULL);
} else {
/* For cleanup of untransmitted messages */
can_free_echo_skb(dev, i);
@@ -1448,7 +1448,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
* taken.
*/
priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
- can_put_echo_skb(skb, dev, slotindex);
+ can_put_echo_skb(skb, dev, slotindex, 0);
/* Make sure everything is written before allowing hardware to
* read from the memory
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 86b0e1406a21..5bb957a26bc6 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -629,7 +629,7 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
/* TX IRQ */
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
- stats->tx_bytes += can_get_echo_skb(ndev, 0);
+ stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
}
@@ -922,7 +922,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
writel(0, priv->base + IFI_CANFD_TXFIFO_REPEATCOUNT);
writel(0, priv->base + IFI_CANFD_TXFIFO_SUSPEND_US);
- can_put_echo_skb(skb, ndev, 0);
+ can_put_echo_skb(skb, ndev, 0, 0);
/* Start the transmission */
writel(IFI_CANFD_TXSTCMD_ADD_MSG, priv->base + IFI_CANFD_TXSTCMD);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 969cedb9b0b6..37e05010ca91 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -778,7 +778,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx);
+ can_put_echo_skb(skb, netdev, can->echo_idx, 0);
/* Move echo index to the next slot */
can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
@@ -1467,7 +1467,7 @@ static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
struct net_device_stats *stats = &can->can.dev->stats;
stats->tx_bytes += dlc;
@@ -1533,7 +1533,7 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
u8 count = ioread32(can->reg_base +
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile
index ef7963ff2006..d717bbc9e033 100644
--- a/drivers/net/can/m_can/Makefile
+++ b/drivers/net/can/m_can/Makefile
@@ -7,3 +7,7 @@ obj-$(CONFIG_CAN_M_CAN) += m_can.o
obj-$(CONFIG_CAN_M_CAN_PCI) += m_can_pci.o
obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o
obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o
+
+tcan4x5x-objs :=
+tcan4x5x-objs += tcan4x5x-core.o
+tcan4x5x-objs += tcan4x5x-regmap.o
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index da551fd0f502..3752520a7d4b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -930,7 +930,7 @@ static void m_can_echo_tx_event(struct net_device *dev)
(fgi << TXEFA_EFAI_SHIFT)));
/* update stats */
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
stats->tx_packets++;
}
}
@@ -972,7 +972,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
- stats->tx_bytes += can_get_echo_skb(dev, 0);
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
@@ -1483,7 +1483,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -1554,7 +1554,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx);
+ can_put_echo_skb(skb, dev, putidx, 0);
/* Enable TX FIFO element to start transfer */
m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 970f0e9d19bf..b7caec769ddb 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -2,15 +2,8 @@
// SPI to CAN driver for the Texas Instruments TCAN4x5x
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
+#include "tcan4x5x.h"
-#include <linux/regulator/consumer.h>
-#include <linux/gpio/consumer.h>
-
-#include "m_can.h"
-
-#define DEVICE_NAME "tcan4x5x"
#define TCAN4X5X_EXT_CLK_DEF 40000000
#define TCAN4X5X_DEV_ID0 0x00
@@ -88,14 +81,10 @@
#define TCAN4X5X_MRAM_START 0x8000
#define TCAN4X5X_MCAN_OFFSET 0x1000
-#define TCAN4X5X_MAX_REGISTER 0x8fff
#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
#define TCAN4X5X_SET_ALL_INT 0xffffffff
-#define TCAN4X5X_WRITE_CMD (0x61 << 24)
-#define TCAN4X5X_READ_CMD (0x41 << 24)
-
#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6))
#define TCAN4X5X_MODE_SLEEP 0x00
#define TCAN4X5X_MODE_STANDBY BIT(6)
@@ -113,18 +102,6 @@
#define TCAN4X5X_WD_3_S_TIMER BIT(29)
#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29))
-struct tcan4x5x_priv {
- struct m_can_classdev cdev;
-
- struct regmap *regmap;
- struct spi_device *spi;
-
- struct gpio_desc *reset_gpio;
- struct gpio_desc *device_wake_gpio;
- struct gpio_desc *device_state_gpio;
- struct regulator *power;
-};
-
static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
{
return container_of(cdev, struct tcan4x5x_priv, cdev);
@@ -167,72 +144,6 @@ static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
return ret;
}
-static int regmap_spi_gather_write(void *context, const void *reg,
- size_t reg_len, const void *val,
- size_t val_len)
-{
- struct device *dev = context;
- struct spi_device *spi = to_spi_device(dev);
- struct spi_message m;
- u32 addr;
- struct spi_transfer t[2] = {
- { .tx_buf = &addr, .len = reg_len, .cs_change = 0,},
- { .tx_buf = val, .len = val_len, },
- };
-
- addr = TCAN4X5X_WRITE_CMD | (*((u16 *)reg) << 8) | val_len >> 2;
-
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
- return spi_sync(spi, &m);
-}
-
-static int tcan4x5x_regmap_write(void *context, const void *data, size_t count)
-{
- u16 *reg = (u16 *)(data);
- const u32 *val = data + 4;
-
- return regmap_spi_gather_write(context, reg, 4, val, count - 4);
-}
-
-static int regmap_spi_async_write(void *context,
- const void *reg, size_t reg_len,
- const void *val, size_t val_len,
- struct regmap_async *a)
-{
- return -ENOTSUPP;
-}
-
-static struct regmap_async *regmap_spi_async_alloc(void)
-{
- return NULL;
-}
-
-static int tcan4x5x_regmap_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct device *dev = context;
- struct spi_device *spi = to_spi_device(dev);
-
- u32 addr = TCAN4X5X_READ_CMD | (*((u16 *)reg) << 8) | val_size >> 2;
-
- return spi_write_then_read(spi, &addr, reg_size, (u32 *)val, val_size);
-}
-
-static struct regmap_bus tcan4x5x_bus = {
- .write = tcan4x5x_regmap_write,
- .gather_write = regmap_spi_gather_write,
- .async_write = regmap_spi_async_write,
- .async_alloc = regmap_spi_async_alloc,
- .read = tcan4x5x_regmap_read,
- .read_flag_mask = 0x00,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg)
{
struct tcan4x5x_priv *priv = cdev_to_priv(cdev);
@@ -387,13 +298,6 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
return 0;
}
-static const struct regmap_config tcan4x5x_regmap = {
- .reg_bits = 32,
- .val_bits = 32,
- .cache_type = REGCACHE_NONE,
- .max_register = TCAN4X5X_MAX_REGISTER,
-};
-
static struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
.read_reg = tcan4x5x_read_reg,
@@ -450,17 +354,14 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
/* Configure the SPI bus */
- spi->bits_per_word = 32;
+ spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret)
goto out_m_can_class_free_dev;
- priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
- &spi->dev, &tcan4x5x_regmap);
- if (IS_ERR(priv->regmap)) {
- ret = PTR_ERR(priv->regmap);
+ ret = tcan4x5x_regmap_init(priv);
+ if (ret)
goto out_m_can_class_free_dev;
- }
ret = tcan4x5x_power_enable(priv->power, 1);
if (ret)
@@ -502,23 +403,26 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
}
static const struct of_device_id tcan4x5x_of_match[] = {
- { .compatible = "ti,tcan4x5x", },
- { }
+ {
+ .compatible = "ti,tcan4x5x",
+ }, {
+ /* sentinel */
+ },
};
MODULE_DEVICE_TABLE(of, tcan4x5x_of_match);
static const struct spi_device_id tcan4x5x_id_table[] = {
{
- .name = "tcan4x5x",
- .driver_data = 0,
+ .name = "tcan4x5x",
+ }, {
+ /* sentinel */
},
- { }
};
MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
static struct spi_driver tcan4x5x_can_driver = {
.driver = {
- .name = DEVICE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = tcan4x5x_of_match,
.pm = NULL,
},
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
new file mode 100644
index 000000000000..ca80dbaf7a3f
--- /dev/null
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver
+//
+// Copyright (c) 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2018-2019 Texas Instruments Incorporated
+// http://www.ti.com/
+
+#include "tcan4x5x.h"
+
+#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
+#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
+
+#define TCAN4X5X_MAX_REGISTER 0x8ffc
+
+static int tcan4x5x_regmap_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len,
+ },
+ };
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd.cmd) +
+ sizeof(buf_tx->cmd.addr));
+ tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len);
+ memcpy(buf_tx->data, val, val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static int tcan4x5x_regmap_write(void *context, const void *data, size_t count)
+{
+ return tcan4x5x_regmap_gather_write(context, data, sizeof(__be32),
+ data + sizeof(__be32),
+ count - sizeof(__be32));
+}
+
+static int tcan4x5x_regmap_read(void *context,
+ const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ struct tcan4x5x_map_buf *buf_rx = &priv->map_buf_rx;
+ struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx;
+ struct spi_transfer xfer[2] = {
+ {
+ .tx_buf = buf_tx,
+ }
+ };
+ struct spi_message msg;
+ int err;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ memcpy(&buf_tx->cmd, reg_buf, sizeof(buf_tx->cmd.cmd) +
+ sizeof(buf_tx->cmd.addr));
+ tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len);
+
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = val_buf;
+ xfer[1].len = val_len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len;
+
+ if (TCAN4X5X_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len);
+ }
+
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ if (!(spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX))
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static const struct regmap_range tcan4x5x_reg_table_yes_range[] = {
+ regmap_reg_range(0x0000, 0x002c), /* Device ID and SPI Registers */
+ regmap_reg_range(0x0800, 0x083c), /* Device configuration registers and Interrupt Flags*/
+ regmap_reg_range(0x1000, 0x10fc), /* M_CAN */
+ regmap_reg_range(0x8000, 0x87fc), /* MRAM */
+};
+
+static const struct regmap_access_table tcan4x5x_reg_table = {
+ .yes_ranges = tcan4x5x_reg_table_yes_range,
+ .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_yes_range),
+};
+
+static const struct regmap_config tcan4x5x_regmap = {
+ .reg_bits = 24,
+ .reg_stride = 4,
+ .pad_bits = 8,
+ .val_bits = 32,
+ .wr_table = &tcan4x5x_reg_table,
+ .rd_table = &tcan4x5x_reg_table,
+ .max_register = TCAN4X5X_MAX_REGISTER,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = (__force unsigned long)
+ cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_READ),
+ .write_flag_mask = (__force unsigned long)
+ cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_WRITE),
+};
+
+static const struct regmap_bus tcan4x5x_bus = {
+ .write = tcan4x5x_regmap_write,
+ .gather_write = tcan4x5x_regmap_gather_write,
+ .read = tcan4x5x_regmap_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+ .max_raw_read = 256,
+ .max_raw_write = 256,
+};
+
+int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv)
+{
+ priv->regmap = devm_regmap_init(&priv->spi->dev, &tcan4x5x_bus,
+ priv->spi, &tcan4x5x_regmap);
+ return PTR_ERR_OR_ZERO(priv->regmap);
+}
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
new file mode 100644
index 000000000000..c66da829b795
--- /dev/null
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver
+ *
+ * Copyright (c) 2020 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _TCAN4X5X_H
+#define _TCAN4X5X_H
+
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include "m_can.h"
+
+#define TCAN4X5X_SANITIZE_SPI 1
+
+struct __packed tcan4x5x_buf_cmd {
+ u8 cmd;
+ __be16 addr;
+ u8 len;
+};
+
+struct tcan4x5x_map_buf {
+ struct tcan4x5x_buf_cmd cmd;
+ u8 data[256 * sizeof(u32)];
+} ____cacheline_aligned;
+
+struct tcan4x5x_priv {
+ struct m_can_classdev cdev;
+
+ struct regmap *regmap;
+ struct spi_device *spi;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *device_wake_gpio;
+ struct gpio_desc *device_state_gpio;
+ struct regulator *power;
+
+ struct tcan4x5x_map_buf map_buf_rx;
+ struct tcan4x5x_map_buf map_buf_tx;
+};
+
+static inline void
+tcan4x5x_spi_cmd_set_len(struct tcan4x5x_buf_cmd *cmd, u8 len)
+{
+ /* number of u32 */
+ cmd->len = len >> 2;
+}
+
+int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv);
+
+#endif
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 5ed00a1558e1..fa32e418eb29 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -270,7 +270,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
- can_put_echo_skb(skb, dev, buf_id);
+ can_put_echo_skb(skb, dev, buf_id, 0);
/* Enable interrupt. */
priv->tx_active |= 1 << buf_id;
@@ -448,7 +448,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
out_8(&regs->cantbsel, mask);
stats->tx_bytes += in_8(&regs->tx.dlr);
stats->tx_packets++;
- can_get_echo_skb(dev, entry->id);
+ can_get_echo_skb(dev, entry->id, NULL);
priv->tx_active &= ~mask;
list_del(pos);
}
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 4f9e7ec192aa..92a54a5fd4c5 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -711,7 +711,7 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
struct net_device_stats *stats = &(priv->ndev->stats);
u32 dlc;
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL);
iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
&priv->regs->ifregs[1].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
@@ -924,7 +924,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
&priv->regs->ifregs[1].data[i / 2]);
}
- can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
+ can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1, 0);
/* Set the size of the data. Update if2_mcont */
iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index c5334b0c3038..00847cbaf7b6 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -266,7 +266,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
- can_get_echo_skb(priv->ndev, msg->client);
+ can_get_echo_skb(priv->ndev, msg->client, NULL);
/* count bytes of the echo instead of skb */
stats->tx_bytes += cf_len;
@@ -716,7 +716,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&priv->echo_lock, flags);
/* prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, ndev, priv->echo_idx);
+ can_put_echo_skb(skb, ndev, priv->echo_idx, 0);
/* move echo index to the next slot */
priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index c803327f8f79..4870c4ea190a 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -386,7 +386,7 @@ static void rcar_can_tx_done(struct net_device *ndev)
stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
RCAR_CAN_FIFO_DEPTH];
priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
- can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+ can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL);
priv->tx_tail++;
netif_wake_queue(ndev);
}
@@ -617,7 +617,7 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
- can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0);
priv->tx_head++;
/* Start Tx: write 0xff to the TFPCR register to increment
* the CPU-side pointer for the transmit FIFO to the next
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 2778ed5c61d1..d8d233e62990 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1044,7 +1044,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
stats->tx_packets++;
stats->tx_bytes += priv->tx_len[sent];
priv->tx_len[sent] = 0;
- can_get_echo_skb(ndev, sent);
+ can_get_echo_skb(ndev, sent, NULL);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
@@ -1390,7 +1390,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
- can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH);
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_head++;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index b6a7003c51d2..9e86488ba55f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -318,7 +318,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
for (i = 0; i < cf->len; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
cmd_reg_val |= CMD_AT;
@@ -531,7 +531,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
stats->tx_bytes +=
priv->read_reg(priv, SJA1000_FI) & 0xf;
stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
}
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 40070c930202..c44f3411e561 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -104,7 +104,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
card->tx.last_bus = priv->index;
++card->tx.pending;
++priv->tx.pending;
- can_put_echo_skb(skb, dev, priv->tx.echo_put);
+ can_put_echo_skb(skb, dev, priv->tx.echo_put, 0);
++priv->tx.echo_put;
if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
priv->tx.echo_put = 0;
@@ -284,7 +284,7 @@ static int softing_handle_1(struct softing *card)
skb = priv->can.echo_skb[priv->tx.echo_get];
if (skb)
skb->tstamp = ktime;
- can_get_echo_skb(netdev, priv->tx.echo_get);
+ can_get_echo_skb(netdev, priv->tx.echo_get, NULL);
++priv->tx.echo_get;
if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
priv->tx.echo_get = 0;
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index f9455de94786..c3e020c90111 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -586,7 +586,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
frame = (struct can_frame *)priv->tx_skb->data;
hi3110_hw_tx(spi, frame);
priv->tx_len = 1 + frame->len;
- can_put_echo_skb(priv->tx_skb, net, 0);
+ can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
}
@@ -725,7 +725,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
- can_get_echo_skb(net, 0);
+ can_get_echo_skb(net, 0, NULL);
priv->tx_len = 0;
}
netif_wake_queue(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 25859d16d06f..f69fb4238a65 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1002,7 +1002,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
frame->len = CAN_FRAME_MAX_DATA_LEN;
mcp251x_hw_tx(spi, frame, 0);
priv->tx_len = 1 + frame->len;
- can_put_echo_skb(priv->tx_skb, net, 0);
+ can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
}
@@ -1171,7 +1171,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
- can_get_echo_skb(net, 0);
+ can_get_echo_skb(net, 0, NULL);
priv->tx_len = 0;
}
netif_wake_queue(net);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index f07e8b737d31..00e9855c23d1 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1271,7 +1271,7 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
stats->tx_bytes +=
can_rx_offload_get_echo_skb(&priv->offload,
mcp251xfd_get_tef_tail(priv),
- hw_tef_obj->ts);
+ hw_tef_obj->ts, NULL);
stats->tx_packets++;
priv->tef->tail++;
@@ -2436,7 +2436,7 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
netif_stop_queue(ndev);
- can_put_echo_skb(skb, ndev, tx_head);
+ can_put_echo_skb(skb, ndev, tx_head, 0);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
if (err)
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 783b63218b7b..54aa7c25c4de 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -448,7 +448,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
writel(msg_flag_n, priv->base + SUN4I_REG_BUF0_ADDR);
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
sun4i_can_write_cmdreg(priv, SUN4I_CMD_SELF_RCV_REQ);
@@ -655,7 +655,7 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
readl(priv->base +
SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index a6850ff0b55b..73245d8836a9 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -513,7 +513,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
*(u32 *)(cf->data + 4) = 0;
- can_put_echo_skb(skb, ndev, mbxno);
+ can_put_echo_skb(skb, ndev, mbxno, 0);
spin_lock_irqsave(&priv->mbx_lock, flags);
--priv->tx_head;
@@ -757,7 +757,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
stamp = hecc_read_stamp(priv, mbxno);
stats->tx_bytes +=
can_rx_offload_get_echo_skb(&priv->offload,
- mbxno, stamp);
+ mbxno, stamp, NULL);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
--priv->tx_tail;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 25eee4466364..18f40eb20360 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -518,7 +518,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += context->dlc;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -801,7 +801,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9eed75a4b678..562acbf454fd 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -357,7 +357,7 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
if (!msg->msg.txdone.status) {
stats->tx_packets++;
stats->tx_bytes += context->len;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
} else {
stats->tx_errors++;
can_free_echo_skb(netdev, context->echo_index);
@@ -783,7 +783,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&priv->active_tx_jobs);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 0487095e1fd0..a00dc1904415 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -370,7 +370,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- can_get_echo_skb(netdev, hf->echo_id);
+ can_get_echo_skb(netdev, hf->echo_id, NULL);
gs_free_tx_context(txc);
@@ -525,7 +525,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, idx);
+ can_put_echo_skb(skb, netdev, idx, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index e2d58846c40c..2b7efd296758 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -578,7 +578,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
context->priv = priv;
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 480bd2ecb296..dcee8dc828ec 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1151,7 +1151,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
- can_get_echo_skb(priv->netdev, context->echo_index);
+ can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 98c016ef0607..59ba7c7beec0 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -594,7 +594,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_get_echo_skb(priv->netdev, context->echo_index);
+ can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index df54eb7d4b36..4232a7126c1b 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -237,7 +237,7 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_bytes += ctx->dlc;
can_led_event(netdev, CAN_LED_EVENT_TX);
- can_get_echo_skb(netdev, ctx->ndx);
+ can_get_echo_skb(netdev, ctx->ndx, NULL);
}
if (urb->status)
@@ -355,7 +355,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
usb_msg.dlc |= MCBA_DLC_RTR_MASK;
- can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+ can_put_echo_skb(skb, priv->netdev, ctx->ndx, 0);
err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
if (err)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 251835ea15aa..573b11559d73 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -309,7 +309,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
}
/* should always release echo skb and corresponding context */
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
context->echo_index = PCAN_USB_MAX_TX_URBS;
/* do wakeup tx queue in case of success only */
@@ -365,7 +365,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 7d92da8954fe..fa403c080871 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -672,7 +672,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
/* update statistics */
up->netdev->stats.tx_packets++;
up->netdev->stats.tx_bytes += dlc;
- can_get_echo_skb(up->netdev, echo_index);
+ can_get_echo_skb(up->netdev, echo_index, NULL);
} else {
up->netdev->stats.tx_dropped++;
can_free_echo_skb(up->netdev, echo_index);
@@ -1137,7 +1137,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
/* put the skb on can loopback stack */
spin_lock_irqsave(&up->echo_skb_lock, flags);
- can_put_echo_skb(skb, up->netdev, echo_index);
+ can_put_echo_skb(skb, up->netdev, echo_index, 0);
spin_unlock_irqrestore(&up->echo_skb_lock, flags);
/* transmit it */
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 44478304ff46..e8c42430a4fc 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -585,7 +585,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += context->dlc;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
can_led_event(netdev, CAN_LED_EVENT_TX);
@@ -664,7 +664,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&priv->active_tx_urbs);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3f54edee92eb..37fa19c62d73 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -592,9 +592,9 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
(priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
else
- can_put_echo_skb(skb, ndev, 0);
+ can_put_echo_skb(skb, ndev, 0, 0);
priv->tx_head++;
@@ -1292,7 +1292,7 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
while (frames_sent--) {
stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
- priv->tx_max);
+ priv->tx_max, NULL);
priv->tx_tail++;
stats->tx_packets++;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f6a0488589fc..3af373e90806 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -60,6 +60,8 @@ source "drivers/net/dsa/qca/Kconfig"
source "drivers/net/dsa/sja1105/Kconfig"
+source "drivers/net/dsa/xrs700x/Kconfig"
+
config NET_DSA_QCA8K
tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
depends on NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index a84adb140a04..f3598c040994 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -24,3 +24,4 @@ obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
obj-y += sja1105/
+obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 95c7fa171e35..23fc7225c8d1 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1374,26 +1374,22 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_phylink_mac_link_up);
-int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct b53_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
return 0;
}
EXPORT_SYMBOL(b53_vlan_filtering);
-int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
- if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
+ if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
return -EOPNOTSUPP;
/* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
@@ -1404,47 +1400,50 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return -EINVAL;
- if (vlan->vid_end >= dev->num_vlans)
+ if (vlan->vid >= dev->num_vlans)
return -ERANGE;
b53_enable_vlan(dev, true, ds->vlan_filtering);
return 0;
}
-EXPORT_SYMBOL(b53_vlan_prepare);
-void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
- u16 vid;
+ int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &dev->vlans[vid];
+ err = b53_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
- b53_get_vlan_entry(dev, vid, vl);
+ vl = &dev->vlans[vlan->vid];
- if (vid == 0 && vid == b53_default_pvid(dev))
- untagged = true;
+ b53_get_vlan_entry(dev, vlan->vid, vl);
- vl->members |= BIT(port);
- if (untagged && !dsa_is_cpu_port(ds, port))
- vl->untag |= BIT(port);
- else
- vl->untag &= ~BIT(port);
+ if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
+ untagged = true;
- b53_set_vlan_entry(dev, vid, vl);
- b53_fast_age_vlan(dev, vid);
- }
+ vl->members |= BIT(port);
+ if (untagged && !dsa_is_cpu_port(ds, port))
+ vl->untag |= BIT(port);
+ else
+ vl->untag &= ~BIT(port);
+
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
if (pvid && !dsa_is_cpu_port(ds, port)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- vlan->vid_end);
- b53_fast_age_vlan(dev, vid);
+ vlan->vid);
+ b53_fast_age_vlan(dev, vlan->vid);
}
+
+ return 0;
}
EXPORT_SYMBOL(b53_vlan_add);
@@ -1454,27 +1453,24 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct b53_vlan *vl;
- u16 vid;
u16 pvid;
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &dev->vlans[vid];
+ vl = &dev->vlans[vlan->vid];
- b53_get_vlan_entry(dev, vid, vl);
+ b53_get_vlan_entry(dev, vlan->vid, vl);
- vl->members &= ~BIT(port);
+ vl->members &= ~BIT(port);
- if (pvid == vid)
- pvid = b53_default_pvid(dev);
+ if (pvid == vlan->vid)
+ pvid = b53_default_pvid(dev);
- if (untagged && !dsa_is_cpu_port(ds, port))
- vl->untag &= ~(BIT(port));
+ if (untagged && !dsa_is_cpu_port(ds, port))
+ vl->untag &= ~(BIT(port));
- b53_set_vlan_entry(dev, vid, vl);
- b53_fast_age_vlan(dev, vid);
- }
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
b53_fast_age_vlan(dev, pvid);
@@ -1751,8 +1747,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_dump);
-int b53_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct b53_device *priv = ds->priv;
@@ -1762,19 +1758,7 @@ int b53_mdb_prepare(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
- return 0;
-}
-EXPORT_SYMBOL(b53_mdb_prepare);
-
-void b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct b53_device *priv = ds->priv;
- int ret;
-
- ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
- if (ret)
- dev_err(ds->dev, "failed to add MDB entry\n");
+ return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
}
EXPORT_SYMBOL(b53_mdb_add);
@@ -2207,7 +2191,6 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fast_age = b53_br_fast_age,
.port_egress_floods = b53_br_egress_floods,
.port_vlan_filtering = b53_vlan_filtering,
- .port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
.port_fdb_dump = b53_fdb_dump,
@@ -2215,7 +2198,6 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fdb_del = b53_fdb_del,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
- .port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
.port_max_mtu = b53_get_max_mtu,
@@ -2459,6 +2441,20 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
+ /* Starfighter 2 */
+ {
+ .chip_id = BCM4908_DEVICE_ID,
+ .dev_name = "BCM4908",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 256,
+ .cpu_port = 8, /* TODO: ports 4, 5, 8 */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
{
.chip_id = BCM7445_DEVICE_ID,
.dev_name = "BCM7445",
@@ -2606,9 +2602,8 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
- ds->configure_vlan_while_not_filtering = true;
ds->untag_bridge_pvid = true;
- dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
+ dev->vlan_enabled = true;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 7c67409bb186..0d2cc0453bef 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -64,6 +64,7 @@ struct b53_io_ops {
#define B53_INVALID_LANE 0xff
enum {
+ BCM4908_DEVICE_ID = 0x4908,
BCM5325_DEVICE_ID = 0x25,
BCM5365_DEVICE_ID = 0x65,
BCM5389_DEVICE_ID = 0x89,
@@ -347,12 +348,9 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause);
-int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans);
-int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
-void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
+int b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
@@ -361,10 +359,8 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
-int b53_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-void b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+int b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int b53_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 1e9a0adda2d6..d53485c79d77 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -105,7 +105,8 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_IMP;
else
offset = CORE_STS_OVERRIDE_IMP2;
@@ -715,7 +716,8 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
u32 reg, offset;
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
else
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
@@ -742,7 +744,8 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM7445_DEVICE_ID)
+ if (priv->type == BCM4908_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
else
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
@@ -1113,7 +1116,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.port_stp_state_set = b53_br_set_stp_state,
.port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
- .port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
.port_fdb_dump = b53_fdb_dump,
@@ -1123,7 +1125,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_rxnfc = bcm_sf2_set_rxnfc,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
- .port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
};
@@ -1135,6 +1136,30 @@ struct bcm_sf2_of_data {
unsigned int num_cfp_rules;
};
+static const u16 bcm_sf2_4908_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0c,
+ [REG_SWITCH_REVISION] = 0x10,
+ [REG_PHY_REVISION] = 0x14,
+ [REG_SPHY_CNTRL] = 0x24,
+ [REG_CROSSBAR] = 0xc8,
+ [REG_RGMII_0_CNTRL] = 0xe0,
+ [REG_RGMII_1_CNTRL] = 0xec,
+ [REG_RGMII_2_CNTRL] = 0xf8,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
+ .type = BCM4908_DEVICE_ID,
+ .core_reg_align = 0,
+ .reg_offsets = bcm_sf2_4908_reg_offsets,
+ .num_cfp_rules = 0, /* FIXME */
+};
+
/* Register offsets for the SWITCH_REG_* block */
static const u16 bcm_sf2_7445_reg_offsets[] = {
[REG_SWITCH_CNTRL] = 0x00,
@@ -1183,6 +1208,9 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
};
static const struct of_device_id bcm_sf2_of_match[] = {
+ { .compatible = "brcm,bcm4908-switch",
+ .data = &bcm_sf2_4908_data
+ },
{ .compatible = "brcm,bcm7445-switch-v4.0",
.data = &bcm_sf2_7445_data
},
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index d82cee5d9202..ed45d16250e1 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -885,18 +885,15 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
return -EINVAL;
vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
- vlan.vid_begin = vid;
- vlan.vid_end = vid;
+ vlan.vid = vid;
if (cpu_to_be32(fs->h_ext.data[1]) & 1)
vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
else
vlan.flags = 0;
- ret = ds->ops->port_vlan_prepare(ds, port_num, &vlan);
+ ret = ds->ops->port_vlan_add(ds, port_num, &vlan);
if (ret)
return ret;
-
- ds->ops->port_vlan_add(ds, port_num, &vlan);
}
/*
@@ -942,8 +939,7 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
return -EINVAL;
if ((fs->flow_type & FLOW_EXT) &&
- !(ds->ops->port_vlan_prepare || ds->ops->port_vlan_add ||
- ds->ops->port_vlan_del))
+ !(ds->ops->port_vlan_add || ds->ops->port_vlan_del))
return -EOPNOTSUPP;
if (fs->location != RX_CLS_LOC_ANY &&
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index d8a5e6269c0e..1d2d55c9f8aa 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -17,6 +17,7 @@ enum bcm_sf2_reg_offs {
REG_SWITCH_REVISION,
REG_PHY_REVISION,
REG_SPHY_CNTRL,
+ REG_CROSSBAR,
REG_RGMII_0_CNTRL,
REG_RGMII_1_CNTRL,
REG_RGMII_2_CNTRL,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index e38906ae8f23..5f69216376fe 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -190,8 +190,7 @@ static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
}
static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n",
__func__, port, vlan_filtering);
@@ -199,53 +198,36 @@ static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int
-dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct dsa_loop_priv *ps = ds->priv;
- struct mii_bus *bus = ps->bus;
-
- dev_dbg(ds->dev, "%s: port: %d, vlan: %d-%d",
- __func__, port, vlan->vid_begin, vlan->vid_end);
-
- /* Just do a sleeping operation to make lockdep checks effective */
- mdiobus_read(bus, ps->port_base + port, MII_BMSR);
-
- if (vlan->vid_end > ARRAY_SIZE(ps->vlans))
- return -ERANGE;
-
- return 0;
-}
-
-static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
- u16 vid;
+
+ if (vlan->vid > ARRAY_SIZE(ps->vlans))
+ return -ERANGE;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &ps->vlans[vid];
+ vl = &ps->vlans[vlan->vid];
- vl->members |= BIT(port);
- if (untagged)
- vl->untagged |= BIT(port);
- else
- vl->untagged &= ~BIT(port);
+ vl->members |= BIT(port);
+ if (untagged)
+ vl->untagged |= BIT(port);
+ else
+ vl->untagged &= ~BIT(port);
- dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
- __func__, port, vid, untagged ? "un" : "", pvid);
- }
+ dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
+ __func__, port, vlan->vid, untagged ? "un" : "", pvid);
if (pvid)
- ps->ports[port].pvid = vid;
+ ps->ports[port].pvid = vlan->vid;
+
+ return 0;
}
static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
@@ -253,26 +235,24 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct dsa_loop_priv *ps = ds->priv;
+ u16 pvid = ps->ports[port].pvid;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
- u16 vid, pvid = ps->ports[port].pvid;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- vl = &ps->vlans[vid];
+ vl = &ps->vlans[vlan->vid];
- vl->members &= ~BIT(port);
- if (untagged)
- vl->untagged &= ~BIT(port);
+ vl->members &= ~BIT(port);
+ if (untagged)
+ vl->untagged &= ~BIT(port);
- if (pvid == vid)
- pvid = 1;
+ if (pvid == vlan->vid)
+ pvid = 1;
- dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
- __func__, port, vid, untagged ? "un" : "", pvid);
- }
+ dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
+ __func__, port, vlan->vid, untagged ? "un" : "", pvid);
ps->ports[port].pvid = pvid;
return 0;
@@ -307,7 +287,6 @@ static const struct dsa_switch_ops dsa_loop_driver = {
.port_bridge_leave = dsa_loop_port_bridge_leave,
.port_stp_state_set = dsa_loop_port_stp_state_set,
.port_vlan_filtering = dsa_loop_port_vlan_filtering,
- .port_vlan_prepare = dsa_loop_port_vlan_prepare,
.port_vlan_add = dsa_loop_port_vlan_add,
.port_vlan_del = dsa_loop_port_vlan_del,
.port_change_mtu = dsa_loop_port_change_mtu,
@@ -344,7 +323,6 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
ds->dev = &mdiodev->dev;
ds->ops = &dsa_loop_driver;
ds->priv = ps;
- ds->configure_vlan_while_not_filtering = true;
ps->bus = mdiodev->bus;
dev_set_drvdata(&mdiodev->dev, ds);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 6420b76ea37c..9a1921e653e8 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -348,14 +348,12 @@ static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
*/
for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
const u16 restricted_vid = hellcreek_private_vid(i);
- u16 vid;
if (!dsa_is_user_port(ds, i))
continue;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (vid == restricted_vid)
- return -EBUSY;
+ if (vlan->vid == restricted_vid)
+ return -EBUSY;
}
return 0;
@@ -440,34 +438,35 @@ static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
mutex_unlock(&hellcreek->reg_lock);
}
-static void hellcreek_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int hellcreek_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct hellcreek *hellcreek = ds->priv;
- u16 vid;
+ int err;
+
+ err = hellcreek_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
- dev_dbg(hellcreek->dev, "Add VLANs (%d -- %d) on port %d, %s, %s\n",
- vlan->vid_begin, vlan->vid_end, port,
- untagged ? "untagged" : "tagged",
+ dev_dbg(hellcreek->dev, "Add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- hellcreek_apply_vlan(hellcreek, port, vid, pvid, untagged);
+ hellcreek_apply_vlan(hellcreek, port, vlan->vid, pvid, untagged);
+
+ return 0;
}
static int hellcreek_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct hellcreek *hellcreek = ds->priv;
- u16 vid;
- dev_dbg(hellcreek->dev, "Remove VLANs (%d -- %d) on port %d\n",
- vlan->vid_begin, vlan->vid_end, port);
+ dev_dbg(hellcreek->dev, "Remove VLAN %d on port %d\n", vlan->vid, port);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- hellcreek_unapply_vlan(hellcreek, port, vid);
+ hellcreek_unapply_vlan(hellcreek, port, vlan->vid);
return 0;
}
@@ -866,14 +865,10 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
}
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
struct hellcreek *hellcreek = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n",
vlan_filtering ? "Enable" : "Disable", port);
@@ -1038,11 +1033,6 @@ static int hellcreek_setup(struct dsa_switch *ds)
/* Configure PCP <-> TC mapping */
hellcreek_setup_tc_identity_mapping(hellcreek);
- /* Allow VLAN configurations while not filtering which is the default
- * for new DSA drivers.
- */
- ds->configure_vlan_while_not_filtering = true;
-
/* The VLAN awareness is a global switch setting. Therefore, mixed vlan
* filtering setups are not supported.
*/
@@ -1158,7 +1148,6 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.port_vlan_add = hellcreek_vlan_add,
.port_vlan_del = hellcreek_vlan_del,
.port_vlan_filtering = hellcreek_vlan_filtering,
- .port_vlan_prepare = hellcreek_vlan_prepare,
.setup = hellcreek_setup,
};
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index aa1142d6a9f5..344374025426 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1232,14 +1232,19 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static void lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct lan9303 *chip = ds->priv;
+ int err;
+
+ err = lan9303_port_mdb_prepare(ds, port, mdb);
+ if (err)
+ return err;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
mdb->vid);
- lan9303_alr_add_port(chip, mdb->addr, port, false);
+ return lan9303_alr_add_port(chip, mdb->addr, port, false);
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
@@ -1274,7 +1279,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
.port_fdb_add = lan9303_port_fdb_add,
.port_fdb_del = lan9303_port_fdb_del,
.port_fdb_dump = lan9303_port_fdb_dump,
- .port_mdb_prepare = lan9303_port_mdb_prepare,
.port_mdb_add = lan9303_port_mdb_add,
.port_mdb_del = lan9303_port_mdb_del,
};
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 662e68a0e7e6..9fec97773a15 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -727,23 +727,14 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
}
static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
struct gswip_priv *priv = ds->priv;
/* Do not allow changing the VLAN filtering options while in bridge */
- if (switchdev_trans_ph_prepare(trans)) {
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
-
- if (!bridge)
- return 0;
-
- if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering)
- return -EIO;
-
- return 0;
- }
+ if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering)
+ return -EIO;
if (vlan_filtering) {
/* Use port based VLAN tag */
@@ -781,15 +772,8 @@ static int gswip_setup(struct dsa_switch *ds)
/* disable port fetch/store dma on all ports */
for (i = 0; i < priv->hw_info->max_ports; i++) {
- struct switchdev_trans trans;
-
- /* Skip the prepare phase, this shouldn't return an error
- * during setup.
- */
- trans.ph_prepare = false;
-
gswip_port_disable(ds, i);
- gswip_port_vlan_filtering(ds, i, false, &trans);
+ gswip_port_vlan_filtering(ds, i, false);
}
/* enable Switch */
@@ -843,6 +827,9 @@ static int gswip_setup(struct dsa_switch *ds)
}
gswip_port_enable(ds, cpu_port, NULL);
+
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1146,56 +1133,55 @@ static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
unsigned int max_ports = priv->hw_info->max_ports;
- u16 vid;
- int i;
int pos = max_ports;
+ int i, idx = -1;
/* We only support VLAN filtering on bridges */
if (!dsa_is_cpu_port(ds, port) && !bridge)
return -EOPNOTSUPP;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- int idx = -1;
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vlan->vid) {
+ idx = i;
+ break;
+ }
+ }
- /* Check if there is already a page for this VLAN */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- priv->vlans[i].vid == vid) {
- idx = i;
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
break;
}
}
- /* If this VLAN is not programmed yet, we have to reserve
- * one entry in the VLAN table. Make sure we start at the
- * next position round.
- */
- if (idx == -1) {
- /* Look for a free slot */
- for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
- if (!priv->vlans[pos].bridge) {
- idx = pos;
- pos++;
- break;
- }
- }
-
- if (idx == -1)
- return -ENOSPC;
- }
+ if (idx == -1)
+ return -ENOSPC;
}
return 0;
}
-static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
+ int err;
+
+ err = gswip_port_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
@@ -1203,10 +1189,10 @@ static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
* this.
*/
if (dsa_is_cpu_port(ds, port))
- return;
+ return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid);
+ return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
+ untagged, pvid);
}
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
@@ -1215,8 +1201,6 @@ static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
- int err;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
@@ -1226,13 +1210,7 @@ static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
if (dsa_is_cpu_port(ds, port))
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true);
- if (err)
- return err;
- }
-
- return 0;
+ return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
}
static void gswip_port_fast_age(struct dsa_switch *ds, int port)
@@ -1611,7 +1589,6 @@ static const struct dsa_switch_ops gswip_switch_ops = {
.port_bridge_leave = gswip_port_bridge_leave,
.port_fast_age = gswip_port_fast_age,
.port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_prepare = gswip_port_vlan_prepare,
.port_vlan_add = gswip_port_vlan_add,
.port_vlan_del = gswip_port_vlan_del,
.port_stp_state_set = gswip_port_stp_state_set,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c973db101b72..37a73421e2cc 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -783,55 +783,53 @@ static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct switchdev_trans *trans)
+ bool flag)
{
struct ksz_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
return 0;
}
-static void ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
- u16 data, vid, new_pvid = 0;
+ u16 data, new_pvid = 0;
u8 fid, member, valid;
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ksz8795_r_vlan_table(dev, vid, &data);
- ksz8795_from_vlan(data, &fid, &member, &valid);
+ ksz8795_r_vlan_table(dev, vlan->vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
- /* First time to setup the VLAN entry. */
- if (!valid) {
- /* Need to find a way to map VID to FID. */
- fid = 1;
- valid = 1;
- }
- member |= BIT(port);
+ /* First time to setup the VLAN entry. */
+ if (!valid) {
+ /* Need to find a way to map VID to FID. */
+ fid = 1;
+ valid = 1;
+ }
+ member |= BIT(port);
- ksz8795_to_vlan(fid, member, valid, &data);
- ksz8795_w_vlan_table(dev, vid, data);
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vlan->vid, data);
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- new_pvid = vid;
- }
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ new_pvid = vlan->vid;
if (new_pvid) {
+ u16 vid;
+
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
vid &= 0xfff;
vid |= new_pvid;
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
}
+
+ return 0;
}
static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
@@ -839,7 +837,7 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_device *dev = ds->priv;
- u16 data, vid, pvid, new_pvid = 0;
+ u16 data, pvid, new_pvid = 0;
u8 fid, member, valid;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
@@ -847,24 +845,22 @@ static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ksz8795_r_vlan_table(dev, vid, &data);
- ksz8795_from_vlan(data, &fid, &member, &valid);
+ ksz8795_r_vlan_table(dev, vlan->vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
- member &= ~BIT(port);
+ member &= ~BIT(port);
- /* Invalidate the entry if no more member. */
- if (!member) {
- fid = 0;
- valid = 0;
- }
+ /* Invalidate the entry if no more member. */
+ if (!member) {
+ fid = 0;
+ valid = 0;
+ }
- if (pvid == vid)
- new_pvid = 1;
+ if (pvid == vlan->vid)
+ new_pvid = 1;
- ksz8795_to_vlan(fid, member, valid, &data);
- ksz8795_w_vlan_table(dev, vid, data);
- }
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vlan->vid, data);
if (new_pvid != pvid)
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
@@ -1098,6 +1094,8 @@ static int ksz8795_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1116,11 +1114,9 @@ static const struct dsa_switch_ops ksz8795_switch_ops = {
.port_stp_state_set = ksz8795_port_stp_state_set,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz8795_port_vlan_filtering,
- .port_vlan_prepare = ksz_port_vlan_prepare,
.port_vlan_add = ksz8795_port_vlan_add,
.port_vlan_del = ksz8795_port_vlan_del,
.port_fdb_dump = ksz_port_fdb_dump,
- .port_mdb_prepare = ksz_port_mdb_prepare,
.port_mdb_add = ksz_port_mdb_add,
.port_mdb_del = ksz_port_mdb_del,
.port_mirror_add = ksz8795_port_mirror_add,
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 42e647b67abd..00e38c8e0d01 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -493,14 +493,10 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct switchdev_trans *trans)
+ bool flag)
{
struct ksz_device *dev = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -514,38 +510,40 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
- u16 vid;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return;
- }
-
- vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
- if (untagged)
- vlan_table[1] |= BIT(port);
- else
- vlan_table[1] &= ~BIT(port);
- vlan_table[1] &= ~(BIT(dev->cpu_port));
+ err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
+ if (err) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return err;
+ }
- vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
+ vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
+ if (untagged)
+ vlan_table[1] |= BIT(port);
+ else
+ vlan_table[1] &= ~BIT(port);
+ vlan_table[1] &= ~(BIT(dev->cpu_port));
- if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return;
- }
+ vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
+ err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
+ if (err) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return err;
}
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
+
+ return 0;
}
static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
@@ -554,30 +552,27 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
- u16 vid;
u16 pvid;
ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
pvid = pvid & 0xFFF;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return -ETIMEDOUT;
- }
+ if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return -ETIMEDOUT;
+ }
- vlan_table[2] &= ~BIT(port);
+ vlan_table[2] &= ~BIT(port);
- if (pvid == vid)
- pvid = 1;
+ if (pvid == vlan->vid)
+ pvid = 1;
- if (untagged)
- vlan_table[1] &= ~BIT(port);
+ if (untagged)
+ vlan_table[1] &= ~BIT(port);
- if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return -ETIMEDOUT;
- }
+ if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return -ETIMEDOUT;
}
ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
@@ -784,14 +779,15 @@ exit:
return ret;
}
-static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
u32 data;
int index;
u32 mac_hi, mac_lo;
+ int err = 0;
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
@@ -806,7 +802,8 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
- if (ksz9477_wait_alu_sta_ready(dev)) {
+ err = ksz9477_wait_alu_sta_ready(dev);
+ if (err) {
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
goto exit;
}
@@ -829,8 +826,10 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics)
+ if (index == dev->num_statics) {
+ err = -ENOSPC;
goto exit;
+ }
/* add entry */
static_table[0] = ALU_V_STATIC_VALID;
@@ -852,6 +851,7 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
exit:
mutex_unlock(&dev->alu_mutex);
+ return err;
}
static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
@@ -1381,6 +1381,8 @@ static int ksz9477_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1399,13 +1401,11 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.port_stp_state_set = ksz9477_port_stp_state_set,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz9477_port_vlan_filtering,
- .port_vlan_prepare = ksz_port_vlan_prepare,
.port_vlan_add = ksz9477_port_vlan_add,
.port_vlan_del = ksz9477_port_vlan_del,
.port_fdb_dump = ksz9477_port_fdb_dump,
.port_fdb_add = ksz9477_port_fdb_add,
.port_fdb_del = ksz9477_port_fdb_del,
- .port_mdb_prepare = ksz_port_mdb_prepare,
.port_mdb_add = ksz9477_port_mdb_add,
.port_mdb_del = ksz9477_port_mdb_del,
.port_mirror_add = ksz9477_port_mirror_add,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index cf743133b0b9..4e0619c66573 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -213,15 +213,6 @@ void ksz_port_fast_age(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- /* nothing needed */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
-
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data)
{
@@ -253,16 +244,8 @@ int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
}
EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- /* nothing to do */
- return 0;
-}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
-
-void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
@@ -284,7 +267,7 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port,
/* no available entry */
if (index == dev->num_statics && !empty)
- return;
+ return -ENOSPC;
/* add entry */
if (index == dev->num_statics) {
@@ -301,6 +284,8 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port,
alu.fid = mdb->vid;
}
dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 720f22275c84..f212775372ce 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -161,14 +161,10 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *br);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
-int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
-int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index a67cac15a724..d2196197d920 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1376,12 +1376,8 @@ mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1397,15 +1393,6 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int
-mt7530_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- /* nothing needed */
-
- return 0;
-}
-
static void
mt7530_hw_vlan_add(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
@@ -1493,7 +1480,7 @@ mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
}
-static void
+static int
mt7530_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
@@ -1501,23 +1488,21 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct mt7530_hw_vlan_entry new_entry;
struct mt7530_priv *priv = ds->priv;
- u16 vid;
mutex_lock(&priv->reg_mutex);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
- mt7530_hw_vlan_update(priv, vid, &new_entry,
- mt7530_hw_vlan_add);
- }
+ mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
+ mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add);
if (pvid) {
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
- G0_PORT_VID(vlan->vid_end));
- priv->ports[port].pvid = vlan->vid_end;
+ G0_PORT_VID(vlan->vid));
+ priv->ports[port].pvid = vlan->vid;
}
mutex_unlock(&priv->reg_mutex);
+
+ return 0;
}
static int
@@ -1526,22 +1511,20 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
{
struct mt7530_hw_vlan_entry target_entry;
struct mt7530_priv *priv = ds->priv;
- u16 vid, pvid;
+ u16 pvid;
mutex_lock(&priv->reg_mutex);
pvid = priv->ports[port].pvid;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- mt7530_hw_vlan_entry_init(&target_entry, port, 0);
- mt7530_hw_vlan_update(priv, vid, &target_entry,
- mt7530_hw_vlan_del);
+ mt7530_hw_vlan_entry_init(&target_entry, port, 0);
+ mt7530_hw_vlan_update(priv, vlan->vid, &target_entry,
+ mt7530_hw_vlan_del);
- /* PVID is being restored to the default whenever the PVID port
- * is being removed from the VLAN.
- */
- if (pvid == vid)
- pvid = G0_PORT_VID_DEF;
- }
+ /* PVID is being restored to the default whenever the PVID port
+ * is being removed from the VLAN.
+ */
+ if (pvid == vlan->vid)
+ pvid = G0_PORT_VID_DEF;
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid);
priv->ports[port].pvid = pvid;
@@ -1656,7 +1639,6 @@ mt7530_setup(struct dsa_switch *ds)
* as two netdev instances.
*/
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
- ds->configure_vlan_while_not_filtering = true;
ds->mtu_enforcement_ingress = true;
if (priv->id == ID_MT7530) {
@@ -1895,7 +1877,6 @@ mt7531_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
- ds->configure_vlan_while_not_filtering = true;
ds->mtu_enforcement_ingress = true;
/* Flush the FDB table */
@@ -2618,7 +2599,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_fdb_del = mt7530_port_fdb_del,
.port_fdb_dump = mt7530_port_fdb_dump,
.port_vlan_filtering = mt7530_port_vlan_filtering,
- .port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index eafe6bedc692..2f976050a0d7 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1396,15 +1396,32 @@ static int mv88e6xxx_mac_setup(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
{
+ struct dsa_switch_tree *dst = chip->ds->dst;
+ struct dsa_switch *ds;
+ struct dsa_port *dp;
u16 pvlan = 0;
if (!mv88e6xxx_has_pvt(chip))
return 0;
/* Skip the local source device, which uses in-chip port VLAN */
- if (dev != chip->ds->index)
+ if (dev != chip->ds->index) {
pvlan = mv88e6xxx_port_vlan(chip, dev, port);
+ ds = dsa_switch_find(dst->index, dev);
+ dp = ds ? dsa_to_port(ds, port) : NULL;
+ if (dp && dp->lag_dev) {
+ /* As the PVT is used to limit flooding of
+ * FORWARD frames, which use the LAG ID as the
+ * source port, we must translate dev/port to
+ * the special "LAG device" in the PVT, using
+ * the LAG ID as the port number.
+ */
+ dev = MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK;
+ port = dsa_lag_id(dst, dp->lag_dev);
+ }
+ }
+
return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
}
@@ -1529,72 +1546,69 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
}
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
- u16 vid_begin, u16 vid_end)
+ u16 vid)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
int i, err;
+ if (!vid)
+ return -EOPNOTSUPP;
+
/* DSA and CPU ports have to be members of multiple vlans */
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
return 0;
- if (!vid_begin)
- return -EOPNOTSUPP;
-
- vlan.vid = vid_begin - 1;
+ vlan.vid = vid - 1;
vlan.valid = false;
- do {
- err = mv88e6xxx_vtu_getnext(chip, &vlan);
- if (err)
- return err;
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ if (err)
+ return err;
- if (!vlan.valid)
- break;
+ if (!vlan.valid)
+ return 0;
- if (vlan.vid > vid_end)
- break;
+ if (vlan.vid != vid)
+ return 0;
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
- continue;
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
+ continue;
- if (!dsa_to_port(ds, i)->slave)
- continue;
+ if (!dsa_to_port(ds, i)->slave)
+ continue;
- if (vlan.member[i] ==
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
- continue;
+ if (vlan.member[i] ==
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ continue;
- if (dsa_to_port(ds, i)->bridge_dev ==
- dsa_to_port(ds, port)->bridge_dev)
- break; /* same bridge, check next VLAN */
+ if (dsa_to_port(ds, i)->bridge_dev ==
+ dsa_to_port(ds, port)->bridge_dev)
+ break; /* same bridge, check next VLAN */
- if (!dsa_to_port(ds, i)->bridge_dev)
- continue;
+ if (!dsa_to_port(ds, i)->bridge_dev)
+ continue;
- dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
- port, vlan.vid, i,
- netdev_name(dsa_to_port(ds, i)->bridge_dev));
- return -EOPNOTSUPP;
- }
- } while (vlan.vid < vid_end);
+ dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
+ port, vlan.vid, i,
+ netdev_name(dsa_to_port(ds, i)->bridge_dev));
+ return -EOPNOTSUPP;
+ }
return 0;
}
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans)
+ bool vlan_filtering)
{
struct mv88e6xxx_chip *chip = ds->priv;
u16 mode = vlan_filtering ? MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE :
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return mv88e6xxx_max_vid(chip) ? 0 : -EOPNOTSUPP;
+ if (!mv88e6xxx_max_vid(chip))
+ return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
@@ -1617,13 +1631,9 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
* members, do not support it (yet) and fallback to software VLAN.
*/
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
- vlan->vid_end);
+ err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid);
mv88e6xxx_reg_unlock(chip);
- /* We don't need any dynamic resource from the kernel (yet),
- * so skip the prepare phase.
- */
return err;
}
@@ -1923,9 +1933,6 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
struct mv88e6xxx_vtu_entry vlan;
int i, err;
- if (!vid)
- return -EOPNOTSUPP;
-
vlan.vid = vid - 1;
vlan.valid = false;
@@ -1970,18 +1977,19 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
bool warn;
u8 member;
- u16 vid;
+ int err;
- if (!mv88e6xxx_max_vid(chip))
- return;
+ err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED;
@@ -1997,16 +2005,25 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (mv88e6xxx_port_vlan_join(chip, port, vid, member, warn))
- dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
- vid, untagged ? 'u' : 't');
-
- if (pvid && mv88e6xxx_port_set_pvid(chip, port, vlan->vid_end))
- dev_err(ds->dev, "p%d: failed to set PVID %d\n", port,
- vlan->vid_end);
+ err = mv88e6xxx_port_vlan_join(chip, port, vlan->vid, member, warn);
+ if (err) {
+ dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
+ vlan->vid, untagged ? 'u' : 't');
+ goto out;
+ }
+ if (pvid) {
+ err = mv88e6xxx_port_set_pvid(chip, port, vlan->vid);
+ if (err) {
+ dev_err(ds->dev, "p%d: failed to set PVID %d\n",
+ port, vlan->vid);
+ goto out;
+ }
+ }
+out:
mv88e6xxx_reg_unlock(chip);
+
+ return err;
}
static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
@@ -2055,8 +2072,8 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
- u16 pvid, vid;
int err = 0;
+ u16 pvid;
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
@@ -2067,16 +2084,14 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (err)
goto unlock;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = mv88e6xxx_port_vlan_leave(chip, port, vid);
+ err = mv88e6xxx_port_vlan_leave(chip, port, vlan->vid);
+ if (err)
+ goto unlock;
+
+ if (vlan->vid == pvid) {
+ err = mv88e6xxx_port_set_pvid(chip, port, 0);
if (err)
goto unlock;
-
- if (vid == pvid) {
- err = mv88e6xxx_port_set_pvid(chip, port, 0);
- if (err)
- goto unlock;
- }
}
unlock:
@@ -2860,7 +2875,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
- ds->configure_vlan_while_not_filtering = true;
mv88e6xxx_reg_lock(chip);
@@ -5249,27 +5263,18 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->info->tag_protocol;
}
-static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- /* We don't need any dynamic resource from the kernel (yet),
- * so skip the prepare phase.
- */
-
- return 0;
-}
-
-static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
- MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC))
- dev_err(ds->dev, "p%d: failed to load multicast MAC address\n",
- port);
+ err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
+ MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
mv88e6xxx_reg_unlock(chip);
+
+ return err;
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
@@ -5375,6 +5380,275 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
return err;
}
+static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp;
+ int id, members = 0;
+
+ if (!mv88e6xxx_has_lag(chip))
+ return false;
+
+ id = dsa_lag_id(ds->dst, lag);
+ if (id < 0 || id >= ds->num_lag_ids)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > 8)
+ return false;
+
+ /* We could potentially relax this to include active
+ * backup in the future.
+ */
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+ /* Ideally we would also validate that the hash type matches
+ * the hardware. Alas, this is always set to unknown on team
+ * interfaces.
+ */
+ return true;
+}
+
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp;
+ u16 map = 0;
+ int id;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ /* Build the map of all ports to distribute flows destined for
+ * this LAG. This can be either a local user port, or a DSA
+ * port if the LAG port is on a remote chip.
+ */
+ dsa_lag_foreach_port(dp, ds->dst, lag)
+ map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
+
+ return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
+}
+
+static const u8 mv88e6xxx_lag_mask_table[8][8] = {
+ /* Row number corresponds to the number of active members in a
+ * LAG. Each column states which of the eight hash buckets are
+ * mapped to the column:th port in the LAG.
+ *
+ * Example: In a LAG with three active ports, the second port
+ * ([2][1]) would be selected for traffic mapped to buckets
+ * 3,4,5 (0x38).
+ */
+ { 0xff, 0, 0, 0, 0, 0, 0, 0 },
+ { 0x0f, 0xf0, 0, 0, 0, 0, 0, 0 },
+ { 0x07, 0x38, 0xc0, 0, 0, 0, 0, 0 },
+ { 0x03, 0x0c, 0x30, 0xc0, 0, 0, 0, 0 },
+ { 0x03, 0x0c, 0x30, 0x40, 0x80, 0, 0, 0 },
+ { 0x03, 0x0c, 0x10, 0x20, 0x40, 0x80, 0, 0 },
+ { 0x03, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0 },
+ { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 },
+};
+
+static void mv88e6xxx_lag_set_port_mask(u16 *mask, int port,
+ int num_tx, int nth)
+{
+ u8 active = 0;
+ int i;
+
+ num_tx = num_tx <= 8 ? num_tx : 8;
+ if (nth < num_tx)
+ active = mv88e6xxx_lag_mask_table[num_tx - 1][nth];
+
+ for (i = 0; i < 8; i++) {
+ if (BIT(i) & active)
+ mask[i] |= BIT(port);
+ }
+}
+
+static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ unsigned int id, num_tx;
+ struct net_device *lag;
+ struct dsa_port *dp;
+ int i, err, nth;
+ u16 mask[8];
+ u16 ivec;
+
+ /* Assume no port is a member of any LAG. */
+ ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
+
+ /* Disable all masks for ports that _are_ members of a LAG. */
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (!dp->lag_dev || dp->ds != ds)
+ continue;
+
+ ivec &= ~BIT(dp->index);
+ }
+
+ for (i = 0; i < 8; i++)
+ mask[i] = ivec;
+
+ /* Enable the correct subset of masks for all LAG ports that
+ * are in the Tx set.
+ */
+ dsa_lags_foreach_id(id, ds->dst) {
+ lag = dsa_lag_dev(ds->dst, id);
+ if (!lag)
+ continue;
+
+ num_tx = 0;
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
+ if (dp->lag_tx_enabled)
+ num_tx++;
+ }
+
+ if (!num_tx)
+ continue;
+
+ nth = 0;
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
+ if (!dp->lag_tx_enabled)
+ continue;
+
+ if (dp->ds == ds)
+ mv88e6xxx_lag_set_port_mask(mask, dp->index,
+ num_tx, nth);
+
+ nth++;
+ }
+ }
+
+ for (i = 0; i < 8; i++) {
+ err = mv88e6xxx_g2_trunk_mask_write(chip, i, true, mask[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
+ struct net_device *lag)
+{
+ int err;
+
+ err = mv88e6xxx_lag_sync_masks(ds);
+
+ if (!err)
+ err = mv88e6xxx_lag_sync_map(ds, lag);
+
+ return err;
+}
+
+static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_lag_sync_masks(ds);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err, id;
+
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_port_set_trunk(chip, port, true, id);
+ if (err)
+ goto err_unlock;
+
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ if (err)
+ goto err_clear_trunk;
+
+ mv88e6xxx_reg_unlock(chip);
+ return 0;
+
+err_clear_trunk:
+ mv88e6xxx_port_set_trunk(chip, port, false, 0);
+err_unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
+ struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err_sync, err_trunk;
+
+ mv88e6xxx_reg_lock(chip);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
+ mv88e6xxx_reg_unlock(chip);
+ return err_sync ? : err_trunk;
+}
+
+static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
+ int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_lag_sync_masks(ds);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pvt_map(chip, sw_index, port);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err_sync, err_pvt;
+
+ mv88e6xxx_reg_lock(chip);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
+ mv88e6xxx_reg_unlock(chip);
+ return err_sync ? : err_pvt;
+}
+
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
@@ -5408,13 +5682,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
- .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_dump = mv88e6xxx_port_fdb_dump,
- .port_mdb_prepare = mv88e6xxx_port_mdb_prepare,
.port_mdb_add = mv88e6xxx_port_mdb_add,
.port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mirror_add = mv88e6xxx_port_mirror_add,
@@ -5429,6 +5701,12 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.devlink_param_get = mv88e6xxx_devlink_param_get,
.devlink_param_set = mv88e6xxx_devlink_param_set,
.devlink_info_get = mv88e6xxx_devlink_info_get,
+ .port_lag_change = mv88e6xxx_port_lag_change,
+ .port_lag_join = mv88e6xxx_port_lag_join,
+ .port_lag_leave = mv88e6xxx_port_lag_leave,
+ .crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
+ .crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
+ .crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -5448,6 +5726,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
+ /* Some chips support up to 32, but that requires enabling the
+ * 5-bit port mode, which we do not support. 640k^W16 ought to
+ * be enough for anyone.
+ */
+ ds->num_lag_ids = mv88e6xxx_has_lag(chip) ? 16 : 0;
+
dev_set_drvdata(dev, ds);
return dsa_register_switch(ds);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 3543055bcb51..788b3f585ef3 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -662,6 +662,11 @@ static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
return chip->info->pvt;
}
+static inline bool mv88e6xxx_has_lag(struct mv88e6xxx_chip *chip)
+{
+ return !!chip->info->global2_addr;
+}
+
static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
{
return chip->info->num_databases;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 75b227d0f73b..da8bac8813e1 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -126,8 +126,8 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
/* Offset 0x07: Trunk Mask Table register */
-static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
- bool hash, u16 mask)
+int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hash, u16 mask)
{
u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
@@ -140,8 +140,8 @@ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
/* Offset 0x08: Trunk Mapping Table register */
-static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
- u16 map)
+int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map)
{
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 1f42ee656816..253a79582a1d 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -101,6 +101,7 @@
#define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
#define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
#define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
+#define MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK 0x1f
/* Offset 0x0C: Cross-chip Port VLAN Data Register */
#define MV88E6XXX_G2_PVT_DATA 0x0c
@@ -345,6 +346,10 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hash, u16 mask);
+int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map);
int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
@@ -520,6 +525,18 @@ static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip,
+ int num, bool hash, u16 mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip,
+ int id, u16 map)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
int target, int port)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 77a5fd1798cd..4b46e10a2dde 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -851,6 +851,27 @@ int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
}
+int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
+ bool trunk, u8 id)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK;
+
+ if (trunk)
+ val |= MV88E6XXX_PORT_CTL1_TRUNK_PORT |
+ (id << MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT);
+ else
+ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
+}
+
/* Offset 0x06: Port Based VLAN Map */
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 500e1d4896ff..a729bba050df 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -168,6 +168,9 @@
/* Offset 0x05: Port Control 1 */
#define MV88E6XXX_PORT_CTL1 0x05
#define MV88E6XXX_PORT_CTL1_MESSAGE_PORT 0x8000
+#define MV88E6XXX_PORT_CTL1_TRUNK_PORT 0x4000
+#define MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK 0x0f00
+#define MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT 8
#define MV88E6XXX_PORT_CTL1_FID_11_4_MASK 0x00ff
/* Offset 0x06: Port Based VLAN Map */
@@ -351,6 +354,8 @@ int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype);
int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
bool message_port);
+int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
+ bool trunk, u8 id);
int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
size_t size);
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 7dc230677b78..767cbdccdb3e 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -65,19 +65,12 @@ static int felix_fdb_del(struct dsa_switch *ds, int port,
return ocelot_fdb_del(ocelot, port, addr, vid);
}
-/* This callback needs to be present */
-static int felix_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- return 0;
-}
-
-static void felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int felix_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_mdb_add(ocelot, port, mdb);
+ return ocelot_port_mdb_add(ocelot, port, mdb);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
@@ -116,8 +109,7 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
- u16 vid, flags = vlan->flags;
- int err;
+ u16 flags = vlan->flags;
/* Ocelot switches copy frames as-is to the CPU, so the flags:
* egress-untagged or not, pvid or not, make no difference. This
@@ -130,61 +122,40 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port,
if (port == ocelot->npi)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_prepare(ocelot, port, vid,
- flags & BRIDGE_VLAN_INFO_PVID,
- flags & BRIDGE_VLAN_INFO_UNTAGGED);
- if (err)
- return err;
- }
-
- return 0;
+ return ocelot_vlan_prepare(ocelot, port, vlan->vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
-static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans)
+static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_port_vlan_filtering(ocelot, port, enabled, trans);
+ return ocelot_port_vlan_filtering(ocelot, port, enabled);
}
-static void felix_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int felix_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
u16 flags = vlan->flags;
- u16 vid;
int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_add(ocelot, port, vid,
- flags & BRIDGE_VLAN_INFO_PVID,
- flags & BRIDGE_VLAN_INFO_UNTAGGED);
- if (err) {
- dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
- vid, port, err);
- return;
- }
- }
+ err = felix_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
+
+ return ocelot_vlan_add(ocelot, port, vlan->vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
static int felix_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
- u16 vid;
- int err;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ocelot_vlan_del(ocelot, port, vid);
- if (err) {
- dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
- vid, port, err);
- return err;
- }
- }
- return 0;
+ return ocelot_vlan_del(ocelot, port, vlan->vid);
}
static int felix_port_enable(struct dsa_switch *ds, int port,
@@ -328,7 +299,7 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
ANA_PORT_QOS_CFG,
port);
- for (i = 0; i < FELIX_NUM_TC * 2; i++) {
+ for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
ocelot_rmw_ix(ocelot,
(ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
@@ -451,12 +422,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->map = felix->info->map;
ocelot->stats_layout = felix->info->stats_layout;
ocelot->num_stats = felix->info->num_stats;
- ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
ocelot->inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->devlink = felix->ds->devlink;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
GFP_KERNEL);
@@ -618,6 +589,10 @@ static int felix_setup(struct dsa_switch *ds)
felix_port_qos_map_init(ocelot, port);
}
+ err = ocelot_devlink_sb_register(ocelot);
+ if (err)
+ return err;
+
/* Include the CPU port module in the forwarding mask for unknown
* unicast - the hardware default value for ANA_FLOODING_FLD_UNICAST
* excludes BIT(ocelot->num_phys_ports), and so does ocelot_init, since
@@ -628,7 +603,7 @@ static int felix_setup(struct dsa_switch *ds)
ANA_PGID_PGID, PGID_UC);
ds->mtu_enforcement_ingress = true;
- ds->configure_vlan_while_not_filtering = true;
+ ds->assisted_learning_on_cpu_port = true;
return 0;
}
@@ -639,14 +614,15 @@ static void felix_teardown(struct dsa_switch *ds)
struct felix *felix = ocelot_to_felix(ocelot);
int port;
- if (felix->info->mdio_bus_free)
- felix->info->mdio_bus_free(ocelot);
+ ocelot_devlink_sb_unregister(ocelot);
+ ocelot_deinit_timestamp(ocelot);
+ ocelot_deinit(ocelot);
for (port = 0; port < ocelot->num_phys_ports; port++)
ocelot_deinit_port(ocelot, port);
- ocelot_deinit_timestamp(ocelot);
- /* stop workqueue thread */
- ocelot_deinit(ocelot);
+
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
@@ -780,46 +756,156 @@ static int felix_port_setup_tc(struct dsa_switch *ds, int port,
return -EOPNOTSUPP;
}
+static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int felix_sb_occ_snapshot(struct dsa_switch *ds,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int felix_sb_occ_max_clear(struct dsa_switch *ds,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_cur, p_max);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
- .get_tag_protocol = felix_get_tag_protocol,
- .setup = felix_setup,
- .teardown = felix_teardown,
- .set_ageing_time = felix_set_ageing_time,
- .get_strings = felix_get_strings,
- .get_ethtool_stats = felix_get_ethtool_stats,
- .get_sset_count = felix_get_sset_count,
- .get_ts_info = felix_get_ts_info,
- .phylink_validate = felix_phylink_validate,
- .phylink_mac_config = felix_phylink_mac_config,
- .phylink_mac_link_down = felix_phylink_mac_link_down,
- .phylink_mac_link_up = felix_phylink_mac_link_up,
- .port_enable = felix_port_enable,
- .port_disable = felix_port_disable,
- .port_fdb_dump = felix_fdb_dump,
- .port_fdb_add = felix_fdb_add,
- .port_fdb_del = felix_fdb_del,
- .port_mdb_prepare = felix_mdb_prepare,
- .port_mdb_add = felix_mdb_add,
- .port_mdb_del = felix_mdb_del,
- .port_bridge_join = felix_bridge_join,
- .port_bridge_leave = felix_bridge_leave,
- .port_stp_state_set = felix_bridge_stp_state_set,
- .port_vlan_prepare = felix_vlan_prepare,
- .port_vlan_filtering = felix_vlan_filtering,
- .port_vlan_add = felix_vlan_add,
- .port_vlan_del = felix_vlan_del,
- .port_hwtstamp_get = felix_hwtstamp_get,
- .port_hwtstamp_set = felix_hwtstamp_set,
- .port_rxtstamp = felix_rxtstamp,
- .port_txtstamp = felix_txtstamp,
- .port_change_mtu = felix_change_mtu,
- .port_max_mtu = felix_get_max_mtu,
- .port_policer_add = felix_port_policer_add,
- .port_policer_del = felix_port_policer_del,
- .cls_flower_add = felix_cls_flower_add,
- .cls_flower_del = felix_cls_flower_del,
- .cls_flower_stats = felix_cls_flower_stats,
- .port_setup_tc = felix_port_setup_tc,
+ .get_tag_protocol = felix_get_tag_protocol,
+ .setup = felix_setup,
+ .teardown = felix_teardown,
+ .set_ageing_time = felix_set_ageing_time,
+ .get_strings = felix_get_strings,
+ .get_ethtool_stats = felix_get_ethtool_stats,
+ .get_sset_count = felix_get_sset_count,
+ .get_ts_info = felix_get_ts_info,
+ .phylink_validate = felix_phylink_validate,
+ .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_link_down = felix_phylink_mac_link_down,
+ .phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
+ .port_fdb_dump = felix_fdb_dump,
+ .port_fdb_add = felix_fdb_add,
+ .port_fdb_del = felix_fdb_del,
+ .port_mdb_add = felix_mdb_add,
+ .port_mdb_del = felix_mdb_del,
+ .port_bridge_join = felix_bridge_join,
+ .port_bridge_leave = felix_bridge_leave,
+ .port_stp_state_set = felix_bridge_stp_state_set,
+ .port_vlan_filtering = felix_vlan_filtering,
+ .port_vlan_add = felix_vlan_add,
+ .port_vlan_del = felix_vlan_del,
+ .port_hwtstamp_get = felix_hwtstamp_get,
+ .port_hwtstamp_set = felix_hwtstamp_set,
+ .port_rxtstamp = felix_rxtstamp,
+ .port_txtstamp = felix_txtstamp,
+ .port_change_mtu = felix_change_mtu,
+ .port_max_mtu = felix_get_max_mtu,
+ .port_policer_add = felix_port_policer_add,
+ .port_policer_del = felix_port_policer_del,
+ .cls_flower_add = felix_cls_flower_add,
+ .cls_flower_del = felix_cls_flower_del,
+ .cls_flower_stats = felix_cls_flower_stats,
+ .port_setup_tc = felix_port_setup_tc,
+ .devlink_sb_pool_get = felix_sb_pool_get,
+ .devlink_sb_pool_set = felix_sb_pool_set,
+ .devlink_sb_port_pool_get = felix_sb_port_pool_get,
+ .devlink_sb_port_pool_set = felix_sb_port_pool_set,
+ .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get,
+ .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set,
+ .devlink_sb_occ_snapshot = felix_sb_occ_snapshot,
+ .devlink_sb_occ_max_clear = felix_sb_occ_max_clear,
+ .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get,
+ .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 4c717324ac2f..994835cb9307 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -5,7 +5,6 @@
#define _MSCC_FELIX_H
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
-#define FELIX_NUM_TC 8
/* Platform-specific information */
struct felix_info {
@@ -15,7 +14,6 @@ struct felix_info {
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
- int shared_queue_sz;
int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2e5bbdca5ea4..f9711e69b8d5 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1006,9 +1006,27 @@ static u16 vsc9959_wm_enc(u16 value)
return value;
}
+static u16 vsc9959_wm_dec(u16 wm)
+{
+ WARN_ON(wm & ~GENMASK(8, 0));
+
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+
+static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
@@ -1356,10 +1374,9 @@ static const struct felix_info felix_info_vsc9959 = {
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
- .shared_queue_sz = 128 * 1024,
.num_mact_rows = 2048,
.num_ports = 6,
- .num_tx_queues = FELIX_NUM_TC,
+ .num_tx_queues = OCELOT_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
.ptp_caps = &vsc9959_ptp_caps,
@@ -1408,17 +1425,6 @@ static int felix_pci_probe(struct pci_dev *pdev,
goto err_pci_enable;
}
- /* set up for high or low dma */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
- }
-
felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
if (!felix) {
err = -ENOMEM;
@@ -1429,7 +1435,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = FELIX_NUM_TC;
+ ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev,
felix->info->switch_pci_bar);
@@ -1474,9 +1480,8 @@ err_register_ds:
kfree(ds);
err_alloc_ds:
err_alloc_irq:
-err_alloc_felix:
kfree(felix);
-err_dma:
+err_alloc_felix:
pci_disable_device(pdev);
err_pci_enable:
return err;
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ebbaf6817ec8..5e9bfdea50be 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1057,9 +1057,27 @@ static u16 vsc9953_wm_enc(u16 value)
return value;
}
+static u16 vsc9953_wm_dec(u16 wm)
+{
+ WARN_ON(wm & ~GENMASK(9, 0));
+
+ if (wm & BIT(9))
+ return (wm & GENMASK(8, 0)) * 16;
+
+ return wm;
+}
+
+static void vsc9953_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(25, 13)) >> 13;
+ *maxuse = val & GENMASK(12, 0);
+}
+
static const struct ocelot_ops vsc9953_ops = {
.reset = vsc9953_reset,
.wm_enc = vsc9953_wm_enc,
+ .wm_dec = vsc9953_wm_dec,
+ .wm_stat = vsc9953_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
@@ -1181,9 +1199,9 @@ static const struct felix_info seville_info_vsc9953 = {
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
- .shared_queue_sz = 256 * 1024,
.num_mact_rows = 2048,
.num_ports = 10,
+ .num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 4d49c5f2b790..ca2ad77b71f1 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -101,6 +101,9 @@
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
AR9331_SW_PORT_STATUS_SPEED_M)
+/* MIB registers */
+#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
+
/* Phy bypass mode
* ------------------------------------------------------------------------
* Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
@@ -154,6 +157,66 @@
#define AR9331_SW_MDIO_POLL_SLEEP_US 1
#define AR9331_SW_MDIO_POLL_TIMEOUT_US 20
+/* The interval should be small enough to avoid overflow of 32bit MIBs */
+/*
+ * FIXME: until we can read MIBs from stats64 call directly (i.e. sleep
+ * there), we have to poll stats more frequently then it is actually needed.
+ * For overflow protection, normally, 100 sec interval should have been OK.
+ */
+#define STATS_INTERVAL_JIFFIES (3 * HZ)
+
+struct ar9331_sw_stats_raw {
+ u32 rxbroad; /* 0x00 */
+ u32 rxpause; /* 0x04 */
+ u32 rxmulti; /* 0x08 */
+ u32 rxfcserr; /* 0x0c */
+ u32 rxalignerr; /* 0x10 */
+ u32 rxrunt; /* 0x14 */
+ u32 rxfragment; /* 0x18 */
+ u32 rx64byte; /* 0x1c */
+ u32 rx128byte; /* 0x20 */
+ u32 rx256byte; /* 0x24 */
+ u32 rx512byte; /* 0x28 */
+ u32 rx1024byte; /* 0x2c */
+ u32 rx1518byte; /* 0x30 */
+ u32 rxmaxbyte; /* 0x34 */
+ u32 rxtoolong; /* 0x38 */
+ u32 rxgoodbyte; /* 0x3c */
+ u32 rxgoodbyte_hi;
+ u32 rxbadbyte; /* 0x44 */
+ u32 rxbadbyte_hi;
+ u32 rxoverflow; /* 0x4c */
+ u32 filtered; /* 0x50 */
+ u32 txbroad; /* 0x54 */
+ u32 txpause; /* 0x58 */
+ u32 txmulti; /* 0x5c */
+ u32 txunderrun; /* 0x60 */
+ u32 tx64byte; /* 0x64 */
+ u32 tx128byte; /* 0x68 */
+ u32 tx256byte; /* 0x6c */
+ u32 tx512byte; /* 0x70 */
+ u32 tx1024byte; /* 0x74 */
+ u32 tx1518byte; /* 0x78 */
+ u32 txmaxbyte; /* 0x7c */
+ u32 txoversize; /* 0x80 */
+ u32 txbyte; /* 0x84 */
+ u32 txbyte_hi;
+ u32 txcollision; /* 0x8c */
+ u32 txabortcol; /* 0x90 */
+ u32 txmulticol; /* 0x94 */
+ u32 txsinglecol; /* 0x98 */
+ u32 txexcdefer; /* 0x9c */
+ u32 txdefer; /* 0xa0 */
+ u32 txlatecol; /* 0xa4 */
+};
+
+struct ar9331_sw_port {
+ int idx;
+ struct delayed_work mib_read;
+ struct rtnl_link_stats64 stats;
+ struct spinlock stats_lock;
+};
+
struct ar9331_sw_priv {
struct device *dev;
struct dsa_switch ds;
@@ -165,8 +228,17 @@ struct ar9331_sw_priv {
struct mii_bus *sbus; /* mdio slave */
struct regmap *regmap;
struct reset_control *sw_reset;
+ struct ar9331_sw_port port[AR9331_SW_PORTS];
};
+static struct ar9331_sw_priv *ar9331_sw_port_to_priv(struct ar9331_sw_port *port)
+{
+ struct ar9331_sw_port *p = port - port->idx;
+
+ return (struct ar9331_sw_priv *)((void *)p -
+ offsetof(struct ar9331_sw_priv, port));
+}
+
/* Warning: switch reset will reset last AR9331_SW_MDIO_PHY_MODE_PAGE request
* If some kind of optimization is used, the request should be repeated.
*/
@@ -330,6 +402,8 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
if (ret)
goto error;
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
error:
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
@@ -424,6 +498,7 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
struct regmap *regmap = priv->regmap;
int ret;
@@ -431,6 +506,8 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
AR9331_SW_PORT_STATUS_MAC_MASK, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+
+ cancel_delayed_work_sync(&p->mib_read);
}
static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -441,10 +518,13 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
bool tx_pause, bool rx_pause)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
struct regmap *regmap = priv->regmap;
u32 val;
int ret;
+ schedule_delayed_work(&p->mib_read, 0);
+
val = AR9331_SW_PORT_STATUS_MAC_MASK;
switch (speed) {
case SPEED_1000:
@@ -477,6 +557,73 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
+static void ar9331_read_stats(struct ar9331_sw_port *port)
+{
+ struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct rtnl_link_stats64 *stats = &port->stats;
+ struct ar9331_sw_stats_raw raw;
+ int ret;
+
+ /* Do the slowest part first, to avoid needless locking for long time */
+ ret = regmap_bulk_read(priv->regmap, AR9331_MIB_COUNTER(port->idx),
+ &raw, sizeof(raw) / sizeof(u32));
+ if (ret) {
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return;
+ }
+ /* All MIB counters are cleared automatically on read */
+
+ spin_lock(&port->stats_lock);
+
+ stats->rx_bytes += raw.rxgoodbyte;
+ stats->tx_bytes += raw.txbyte;
+
+ stats->rx_packets += raw.rx64byte + raw.rx128byte + raw.rx256byte +
+ raw.rx512byte + raw.rx1024byte + raw.rx1518byte + raw.rxmaxbyte;
+ stats->tx_packets += raw.tx64byte + raw.tx128byte + raw.tx256byte +
+ raw.tx512byte + raw.tx1024byte + raw.tx1518byte + raw.txmaxbyte;
+
+ stats->rx_length_errors += raw.rxrunt + raw.rxfragment + raw.rxtoolong;
+ stats->rx_crc_errors += raw.rxfcserr;
+ stats->rx_frame_errors += raw.rxalignerr;
+ stats->rx_missed_errors += raw.rxoverflow;
+ stats->rx_dropped += raw.filtered;
+ stats->rx_errors += raw.rxfcserr + raw.rxalignerr + raw.rxrunt +
+ raw.rxfragment + raw.rxoverflow + raw.rxtoolong;
+
+ stats->tx_window_errors += raw.txlatecol;
+ stats->tx_fifo_errors += raw.txunderrun;
+ stats->tx_aborted_errors += raw.txabortcol;
+ stats->tx_errors += raw.txoversize + raw.txabortcol + raw.txunderrun +
+ raw.txlatecol;
+
+ stats->multicast += raw.rxmulti;
+ stats->collisions += raw.txcollision;
+
+ spin_unlock(&port->stats_lock);
+}
+
+static void ar9331_do_stats_poll(struct work_struct *work)
+{
+ struct ar9331_sw_port *port = container_of(work, struct ar9331_sw_port,
+ mib_read.work);
+
+ ar9331_read_stats(port);
+
+ schedule_delayed_work(&port->mib_read, STATS_INTERVAL_JIFFIES);
+}
+
+static void ar9331_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
@@ -485,6 +632,7 @@ static const struct dsa_switch_ops ar9331_sw_ops = {
.phylink_mac_config = ar9331_sw_phylink_mac_config,
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
+ .get_stats64 = ar9331_get_stats64,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
@@ -796,7 +944,7 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv;
struct dsa_switch *ds;
- int ret;
+ int ret, i;
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -831,6 +979,14 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
ds->ops = &priv->ops;
dev_set_drvdata(&mdiodev->dev, priv);
+ for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
+ struct ar9331_sw_port *port = &priv->port[i];
+
+ port->idx = i;
+ spin_lock_init(&port->stats_lock);
+ INIT_DELAYED_WORK(&port->mib_read, ar9331_do_stats_poll);
+ }
+
ret = dsa_register_switch(ds);
if (ret)
goto err_remove_irq;
@@ -846,6 +1002,13 @@ err_remove_irq:
static void ar9331_sw_remove(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
+ struct ar9331_sw_port *port = &priv->port[i];
+
+ cancel_delayed_work_sync(&port->mib_read);
+ }
irq_domain_remove(priv->irqdomain);
mdiobus_unregister(priv->mbus);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 5bdac669a339..6127823d6c2e 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1294,14 +1294,10 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct qca8k_priv *priv = ds->priv;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (vlan_filtering) {
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE,
@@ -1316,13 +1312,6 @@ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
}
static int
-qca8k_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- return 0;
-}
-
-static void
qca8k_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
@@ -1330,24 +1319,24 @@ qca8k_port_vlan_add(struct dsa_switch *ds, int port,
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct qca8k_priv *priv = ds->priv;
int ret = 0;
- u16 vid;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid)
- ret = qca8k_vlan_add(priv, port, vid, untagged);
-
- if (ret)
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
if (pvid) {
int shift = 16 * (port % 2);
qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift,
- vlan->vid_end << shift);
+ 0xfff << shift, vlan->vid << shift);
qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid_end) |
- QCA8K_PORT_VLAN_SVID(vlan->vid_end));
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
}
+
+ return 0;
}
static int
@@ -1356,11 +1345,8 @@ qca8k_port_vlan_del(struct dsa_switch *ds, int port,
{
struct qca8k_priv *priv = ds->priv;
int ret = 0;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid)
- ret = qca8k_vlan_del(priv, port, vid);
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
if (ret)
dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
@@ -1393,7 +1379,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
.port_vlan_filtering = qca8k_port_vlan_filtering,
- .port_vlan_prepare = qca8k_port_vlan_prepare,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_validate = qca8k_phylink_validate,
@@ -1446,7 +1431,6 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
- priv->ds->configure_vlan_while_not_filtering = true;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index 6b6a3dec0984..26376b052594 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -131,12 +131,9 @@ int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
int rtl8366_init_vlan(struct realtek_smi *smi);
int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct switchdev_trans *trans);
-int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
-void rtl8366_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
+ bool vlan_filtering);
+int rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 83d481ef9273..3b24f2e13200 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -340,20 +340,15 @@ int rtl8366_init_vlan(struct realtek_smi *smi)
}
EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct switchdev_trans *trans)
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct realtek_smi *smi = ds->priv;
struct rtl8366_vlan_4k vlan4k;
int ret;
/* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (switchdev_trans_ph_prepare(trans)) {
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
-
- return 0;
- }
+ if (!smi->ops->is_vlan_valid(smi, port + 1))
+ return -EINVAL;
dev_info(smi->dev, "%s filtering on port %d\n",
vlan_filtering ? "enable" : "disable",
@@ -379,76 +374,56 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
-int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct realtek_smi *smi = ds->priv;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
- if (!smi->ops->is_vlan_valid(smi, vid))
- return -EINVAL;
-
- dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
- vlan->vid_begin, vlan->vid_end);
-
- /* Enable VLAN in the hardware
- * FIXME: what's with this 4k business?
- * Just rtl8366_enable_vlan() seems inconclusive.
- */
- return rtl8366_enable_vlan4k(smi, true);
-}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare);
-
-void rtl8366_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
struct realtek_smi *smi = ds->priv;
u32 member = 0;
u32 untag = 0;
- u16 vid;
int ret;
- for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
- if (!smi->ops->is_vlan_valid(smi, vid))
- return;
+ if (!smi->ops->is_vlan_valid(smi, vlan->vid))
+ return -EINVAL;
+
+ /* Enable VLAN in the hardware
+ * FIXME: what's with this 4k business?
+ * Just rtl8366_enable_vlan() seems inconclusive.
+ */
+ ret = rtl8366_enable_vlan4k(smi, true);
+ if (ret)
+ return ret;
dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
- vlan->vid_begin,
- port,
- untagged ? "untagged" : "tagged",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? " PVID" : "no PVID");
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
dev_err(smi->dev, "port is DSA or CPU port\n");
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- member |= BIT(port);
-
- if (untagged)
- untag |= BIT(port);
+ member |= BIT(port);
- ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
- if (ret)
- dev_err(smi->dev,
- "failed to set up VLAN %04x",
- vid);
+ if (untagged)
+ untag |= BIT(port);
- if (!pvid)
- continue;
+ ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
+ if (ret) {
+ dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
+ return ret;
+ }
- ret = rtl8366_set_pvid(smi, port, vid);
- if (ret)
- dev_err(smi->dev,
- "failed to set PVID on port %d to VLAN %04x",
- port, vid);
+ if (!pvid)
+ return 0;
- if (!ret)
- dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
- vid, port);
+ ret = rtl8366_set_pvid(smi, port, vlan->vid);
+ if (ret) {
+ dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
+ port, vlan->vid);
+ return ret;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
@@ -456,46 +431,39 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct realtek_smi *smi = ds->priv;
- u16 vid;
- int ret;
-
- dev_info(smi->dev, "del VLAN on port %d\n", port);
+ int ret, i;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- int i;
+ dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port);
- dev_info(smi->dev, "del VLAN %04x\n", vid);
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
- for (i = 0; i < smi->num_vlan_mc; i++) {
- struct rtl8366_vlan_mc vlanmc;
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
+ if (vlan->vid == vlanmc.vid) {
+ /* Remove this port from the VLAN */
+ vlanmc.member &= ~BIT(port);
+ vlanmc.untag &= ~BIT(port);
+ /*
+ * If no ports are members of this VLAN
+ * anymore then clear the whole member
+ * config so it can be reused.
+ */
+ if (!vlanmc.member && vlanmc.untag) {
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.fid = 0;
+ }
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to remove VLAN %04x\n",
+ vlan->vid);
return ret;
-
- if (vid == vlanmc.vid) {
- /* Remove this port from the VLAN */
- vlanmc.member &= ~BIT(port);
- vlanmc.untag &= ~BIT(port);
- /*
- * If no ports are members of this VLAN
- * anymore then clear the whole member
- * config so it can be reused.
- */
- if (!vlanmc.member && vlanmc.untag) {
- vlanmc.vid = 0;
- vlanmc.priority = 0;
- vlanmc.fid = 0;
- }
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret) {
- dev_err(smi->dev,
- "failed to remove VLAN %04x\n",
- vid);
- return ret;
- }
- break;
}
+ break;
}
}
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index cfe56960f44b..c6cc4938897c 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -972,6 +972,8 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ ds->configure_vlan_while_not_filtering = false;
+
return 0;
}
@@ -1504,7 +1506,6 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
.port_vlan_filtering = rtl8366_vlan_filtering,
- .port_vlan_prepare = rtl8366_vlan_prepare,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 4ebc4a5a7b35..d582308c2401 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -245,8 +245,7 @@ enum sja1105_reset_reason {
int sja1105_static_config_reload(struct sja1105_private *priv,
enum sja1105_reset_reason reason);
-int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans);
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled);
void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
/* From sja1105_devlink.c */
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 4a2ec395bcb0..b4bf1b10e66c 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -135,7 +135,6 @@ static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
rtnl_lock();
for (port = 0; port < ds->num_ports; port++) {
- struct switchdev_trans trans;
struct dsa_port *dp;
if (!dsa_is_user_port(ds, port))
@@ -144,13 +143,7 @@ static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
dp = dsa_to_port(ds, port);
vlan_filtering = dsa_port_is_vlan_filtering(dp);
- trans.ph_prepare = true;
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
- if (rc)
- break;
-
- trans.ph_prepare = false;
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering);
if (rc)
break;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 4ca029650993..282253543f3b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -317,7 +317,7 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ table->entries = kzalloc(table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
@@ -1524,17 +1524,10 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
-/* This callback needs to be present */
-static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
-{
- return 0;
-}
-
-static void sja1105_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int sja1105_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
- sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
@@ -2607,35 +2600,11 @@ out:
return rc;
}
-static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct sja1105_private *priv = ds->priv;
- u16 vid;
-
- if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- return 0;
-
- /* If the user wants best-effort VLAN filtering (aka vlan_filtering
- * bridge plus tagging), be sure to at least deny alterations to the
- * configuration done by dsa_8021q.
- */
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (vid_is_dsa_8021q(vid)) {
- dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
/* The TPID setting belongs to the General Parameters table,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
*/
-int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
- struct switchdev_trans *trans)
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
@@ -2647,16 +2616,12 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
u16 tpid, tpid2;
int rc;
- if (switchdev_trans_ph_prepare(trans)) {
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- if (rule->type == SJA1105_RULE_VL) {
- dev_err(ds->dev,
- "Cannot change VLAN filtering with active VL rules\n");
- return -EBUSY;
- }
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ dev_err(ds->dev,
+ "Cannot change VLAN filtering with active VL rules\n");
+ return -EBUSY;
}
-
- return 0;
}
if (enabled) {
@@ -2794,29 +2759,34 @@ static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
return 0;
}
-static void sja1105_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int sja1105_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
bool vlan_table_changed = false;
- u16 vid;
int rc;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_add_one(ds, port, vid, vlan->flags,
- &priv->bridge_vlans);
- if (rc < 0)
- return;
- if (rc > 0)
- vlan_table_changed = true;
+ /* If the user wants best-effort VLAN filtering (aka vlan_filtering
+ * bridge plus tagging), be sure to at least deny alterations to the
+ * configuration done by dsa_8021q.
+ */
+ if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
+ vid_is_dsa_8021q(vlan->vid)) {
+ dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
+ return -EBUSY;
}
+ rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
+ &priv->bridge_vlans);
+ if (rc < 0)
+ return rc;
+ if (rc > 0)
+ vlan_table_changed = true;
+
if (!vlan_table_changed)
- return;
+ return 0;
- rc = sja1105_build_vlan_table(priv, true);
- if (rc)
- dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
+ return sja1105_build_vlan_table(priv, true);
}
static int sja1105_vlan_del(struct dsa_switch *ds, int port,
@@ -2824,14 +2794,11 @@ static int sja1105_vlan_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
bool vlan_table_changed = false;
- u16 vid;
int rc;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- rc = sja1105_vlan_del_one(ds, port, vid, &priv->bridge_vlans);
- if (rc > 0)
- vlan_table_changed = true;
- }
+ rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
+ if (rc > 0)
+ vlan_table_changed = true;
if (!vlan_table_changed)
return 0;
@@ -2934,8 +2901,6 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
- ds->configure_vlan_while_not_filtering = true;
-
rc = sja1105_devlink_setup(ds);
if (rc < 0)
return rc;
@@ -3298,11 +3263,9 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_bridge_join = sja1105_bridge_join,
.port_bridge_leave = sja1105_bridge_leave,
.port_stp_state_set = sja1105_bridge_stp_state_set,
- .port_vlan_prepare = sja1105_vlan_prepare,
.port_vlan_filtering = sja1105_vlan_filtering,
.port_vlan_add = sja1105_vlan_add,
.port_vlan_del = sja1105_vlan_del,
- .port_mdb_prepare = sja1105_mdb_prepare,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
.port_hwtstamp_get = sja1105_hwtstamp_get,
diff --git a/drivers/net/dsa/xrs700x/Kconfig b/drivers/net/dsa/xrs700x/Kconfig
new file mode 100644
index 000000000000..d10a4dce1676
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_XRS700X
+ tristate
+ depends on NET_DSA
+ select NET_DSA_TAG_XRS700X
+ select REGMAP
+ help
+ This enables support for Arrow SpeedChips XRS7003/7004 gigabit
+ Ethernet switches.
+
+config NET_DSA_XRS700X_I2C
+ tristate "Arrow XRS7000X series switch in I2C mode"
+ depends on NET_DSA && I2C
+ select NET_DSA_XRS700X
+ select REGMAP_I2C
+ help
+ Enable I2C support for Arrow SpeedChips XRS7003/7004 gigabit Ethernet
+ switches.
+
+config NET_DSA_XRS700X_MDIO
+ tristate "Arrow XRS7000X series switch in MDIO mode"
+ depends on NET_DSA
+ select NET_DSA_XRS700X
+ help
+ Enable MDIO support for Arrow SpeedChips XRS7003/7004 gigabit Ethernet
+ switches.
diff --git a/drivers/net/dsa/xrs700x/Makefile b/drivers/net/dsa/xrs700x/Makefile
new file mode 100644
index 000000000000..51a3a7d9296a
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_XRS700X) += xrs700x.o
+obj-$(CONFIG_NET_DSA_XRS700X_I2C) += xrs700x_i2c.o
+obj-$(CONFIG_NET_DSA_XRS700X_MDIO) += xrs700x_mdio.o
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
new file mode 100644
index 000000000000..259f5e657c46
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+#include <linux/of_device.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+#define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
+
+#define XRS7003E_ID 0x100
+#define XRS7003F_ID 0x101
+#define XRS7004E_ID 0x200
+#define XRS7004F_ID 0x201
+
+const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3};
+EXPORT_SYMBOL(xrs7003e_info);
+
+const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3};
+EXPORT_SYMBOL(xrs7003f_info);
+
+const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4};
+EXPORT_SYMBOL(xrs7004e_info);
+
+const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4};
+EXPORT_SYMBOL(xrs7004f_info);
+
+struct xrs700x_regfield {
+ struct reg_field rf;
+ struct regmap_field **rmf;
+};
+
+struct xrs700x_mib {
+ unsigned int offset;
+ const char *name;
+ int stats64_offset;
+};
+
+#define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
+#define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
+
+static const struct xrs700x_mib xrs700x_mibs[] = {
+ XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"),
+ XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets),
+ XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets),
+ XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast),
+ XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors),
+ XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors),
+ XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors),
+ XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors),
+ XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors),
+ XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"),
+ XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes),
+ XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets),
+ XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets),
+ XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"),
+ XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped),
+ XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
+};
+
+static void xrs700x_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
+ strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(xrs700x_mibs);
+}
+
+static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
+{
+ struct xrs700x_port *p = &priv->ports[port];
+ struct rtnl_link_stats64 stats;
+ int i;
+
+ memset(&stats, 0, sizeof(stats));
+
+ mutex_lock(&p->mib_mutex);
+
+ /* Capture counter values */
+ regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1);
+
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
+ unsigned int high = 0, low = 0, reg;
+
+ reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port;
+ regmap_read(priv->regmap, reg, &low);
+ regmap_read(priv->regmap, reg + 2, &high);
+
+ p->mib_data[i] += (high << 16) | low;
+
+ if (xrs700x_mibs[i].stats64_offset >= 0) {
+ u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset;
+ *(u64 *)s += p->mib_data[i];
+ }
+ }
+
+ /* multicast must be added to rx_packets (which already includes
+ * unicast and broadcast)
+ */
+ stats.rx_packets += stats.multicast;
+
+ u64_stats_update_begin(&p->syncp);
+ p->stats64 = stats;
+ u64_stats_update_end(&p->syncp);
+
+ mutex_unlock(&p->mib_mutex);
+}
+
+static void xrs700x_mib_work(struct work_struct *work)
+{
+ struct xrs700x *priv = container_of(work, struct xrs700x,
+ mib_work.work);
+ int i;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ xrs700x_read_port_counters(priv, i);
+
+ schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
+}
+
+static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port,
+ u64 *data)
+{
+ struct xrs700x *priv = ds->priv;
+ struct xrs700x_port *p = &priv->ports[port];
+
+ xrs700x_read_port_counters(priv, port);
+
+ mutex_lock(&p->mib_mutex);
+ memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs));
+ mutex_unlock(&p->mib_mutex);
+}
+
+static void xrs700x_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct xrs700x *priv = ds->priv;
+ struct xrs700x_port *p = &priv->ports[port];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ *s = p->stats64;
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+}
+
+static int xrs700x_setup_regmap_range(struct xrs700x *priv)
+{
+ struct xrs700x_regfield regfields[] = {
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_forward
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_management
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_sel_speed
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_cur_speed
+ }
+ };
+ int i = 0;
+
+ for (; i < ARRAY_SIZE(regfields); i++) {
+ *regfields[i].rmf = devm_regmap_field_alloc(priv->dev,
+ priv->regmap,
+ regfields[i].rf);
+ if (IS_ERR(*regfields[i].rmf))
+ return PTR_ERR(*regfields[i].rmf);
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_XRS700X;
+}
+
+static int xrs700x_reset(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET);
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL,
+ val, !(val & XRS_GENERAL_RESET),
+ 10, 1000);
+error:
+ if (ret) {
+ dev_err_ratelimited(priv->dev, "error resetting switch: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int bpdus = 1;
+ unsigned int val;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ bpdus = 0;
+ fallthrough;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = XRS_PORT_DISABLED;
+ break;
+ case BR_STATE_LEARNING:
+ val = XRS_PORT_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = XRS_PORT_FORWARDING;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ regmap_fields_write(priv->ps_forward, port, val);
+
+ /* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
+ * which allows BPDU forwarding to the CPU port when the front facing
+ * port is in disabled/learning state.
+ */
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus);
+
+ dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n",
+ __func__, port, state, val);
+}
+
+/* Add an inbound policy filter which matches the BPDU destination MAC
+ * and forwards to the CPU port. Leave the policy disabled, it will be
+ * enabled as needed.
+ */
+static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int i = 0;
+ int ret;
+
+ /* Compare all 48 bits of the destination MAC address. */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2);
+ if (ret)
+ return ret;
+
+ /* match BPDU destination 01:80:c2:00:00:00 */
+ for (i = 0; i < sizeof(eth_stp_addr); i += 2) {
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i,
+ eth_stp_addr[i] |
+ (eth_stp_addr[i + 1] << 8));
+ if (ret)
+ return ret;
+ }
+
+ /* Mirror BPDU to CPU port */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xrs700x_port_setup(struct dsa_switch *ds, int port)
+{
+ bool cpu_port = dsa_is_cpu_port(ds, port);
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int ret, i;
+
+ xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED);
+
+ /* Disable forwarding to non-CPU ports */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ /* 1 = Disable forwarding to the port */
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
+ if (ret)
+ return ret;
+
+ val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL;
+ ret = regmap_fields_write(priv->ps_management, port, val);
+ if (ret)
+ return ret;
+
+ if (!cpu_port) {
+ ret = xrs700x_port_add_bpdu_ipf(ds, port);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xrs700x_setup(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+ int ret, i;
+
+ ret = xrs700x_reset(ds);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ ret = xrs700x_port_setup(ds, i);
+ if (ret)
+ return ret;
+ }
+
+ schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
+
+ return 0;
+}
+
+static void xrs700x_teardown(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+
+ cancel_delayed_work_sync(&priv->mib_work);
+}
+
+static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0:
+ break;
+ case 1:
+ case 2:
+ case 3:
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+
+ /* The switch only supports full duplex. */
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val;
+
+ switch (speed) {
+ case SPEED_1000:
+ val = XRS_PORT_SPEED_1000;
+ break;
+ case SPEED_100:
+ val = XRS_PORT_SPEED_100;
+ break;
+ case SPEED_10:
+ val = XRS_PORT_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ regmap_fields_write(priv->ps_sel_speed, port, val);
+
+ dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n",
+ __func__, port, mode, speed);
+}
+
+static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
+ struct net_device *bridge, bool join)
+{
+ unsigned int i, cpu_mask = 0, mask = 0;
+ struct xrs700x *priv = ds->priv;
+ int ret;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+
+ cpu_mask |= BIT(i);
+
+ if (dsa_to_port(ds, i)->bridge_dev == bridge)
+ continue;
+
+ mask |= BIT(i);
+ }
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+
+ /* 1 = Disable forwarding to the port */
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask);
+ if (ret)
+ return ret;
+ }
+
+ if (!join) {
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port),
+ cpu_mask);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ return xrs700x_bridge_common(ds, port, bridge, true);
+}
+
+static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ xrs700x_bridge_common(ds, port, bridge, false);
+}
+
+static const struct dsa_switch_ops xrs700x_ops = {
+ .get_tag_protocol = xrs700x_get_tag_protocol,
+ .setup = xrs700x_setup,
+ .teardown = xrs700x_teardown,
+ .port_stp_state_set = xrs700x_port_stp_state_set,
+ .phylink_validate = xrs700x_phylink_validate,
+ .phylink_mac_link_up = xrs700x_mac_link_up,
+ .get_strings = xrs700x_get_strings,
+ .get_sset_count = xrs700x_get_sset_count,
+ .get_ethtool_stats = xrs700x_get_ethtool_stats,
+ .get_stats64 = xrs700x_get_stats64,
+ .port_bridge_join = xrs700x_bridge_join,
+ .port_bridge_leave = xrs700x_bridge_leave,
+};
+
+static int xrs700x_detect(struct xrs700x *priv)
+{
+ const struct xrs700x_info *info;
+ unsigned int id;
+ int ret;
+
+ ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id);
+ if (ret) {
+ dev_err(priv->dev, "error %d while reading switch id.\n",
+ ret);
+ return ret;
+ }
+
+ info = of_device_get_match_data(priv->dev);
+ if (!info)
+ return -EINVAL;
+
+ if (info->id == id) {
+ priv->ds->num_ports = info->num_ports;
+ dev_info(priv->dev, "%s detected.\n", info->name);
+ return 0;
+ }
+
+ dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n",
+ info->id, id);
+
+ return -ENODEV;
+}
+
+struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
+{
+ struct dsa_switch *ds;
+ struct xrs700x *priv;
+
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ ds->dev = base;
+
+ priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
+
+ ds->ops = &xrs700x_ops;
+ ds->priv = priv;
+ priv->dev = base;
+
+ priv->ds = ds;
+ priv->priv = devpriv;
+
+ return priv;
+}
+EXPORT_SYMBOL(xrs700x_switch_alloc);
+
+static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port)
+{
+ struct xrs700x_port *p = &priv->ports[port];
+
+ p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs),
+ sizeof(*p->mib_data), GFP_KERNEL);
+ if (!p->mib_data)
+ return -ENOMEM;
+
+ mutex_init(&p->mib_mutex);
+ u64_stats_init(&p->syncp);
+
+ return 0;
+}
+
+int xrs700x_switch_register(struct xrs700x *priv)
+{
+ int ret;
+ int i;
+
+ ret = xrs700x_detect(priv);
+ if (ret)
+ return ret;
+
+ ret = xrs700x_setup_regmap_range(priv);
+ if (ret)
+ return ret;
+
+ priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports,
+ sizeof(*priv->ports), GFP_KERNEL);
+ if (!priv->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ ret = xrs700x_alloc_port_mib(priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return dsa_register_switch(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_register);
+
+void xrs700x_switch_remove(struct xrs700x *priv)
+{
+ dsa_unregister_switch(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_remove);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x.h b/drivers/net/dsa/xrs700x/xrs700x.h
new file mode 100644
index 000000000000..ff62cf61b091
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+#include <linux/u64_stats_sync.h>
+#include <uapi/linux/if_link.h>
+
+struct xrs700x_info {
+ unsigned int id;
+ const char *name;
+ size_t num_ports;
+};
+
+extern const struct xrs700x_info xrs7003e_info;
+extern const struct xrs700x_info xrs7003f_info;
+extern const struct xrs700x_info xrs7004e_info;
+extern const struct xrs700x_info xrs7004f_info;
+
+struct xrs700x_port {
+ struct mutex mib_mutex; /* protects mib_data */
+ u64 *mib_data;
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync syncp;
+};
+
+struct xrs700x {
+ struct dsa_switch *ds;
+ struct device *dev;
+ void *priv;
+ struct regmap *regmap;
+ struct regmap_field *ps_forward;
+ struct regmap_field *ps_management;
+ struct regmap_field *ps_sel_speed;
+ struct regmap_field *ps_cur_speed;
+ struct delayed_work mib_work;
+ struct xrs700x_port *ports;
+};
+
+struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
+int xrs700x_switch_register(struct xrs700x *priv);
+void xrs700x_switch_remove(struct xrs700x *priv);
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
new file mode 100644
index 000000000000..a5f8883af829
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+static int xrs700x_i2c_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ unsigned char buf[4];
+ int ret;
+
+ buf[0] = reg >> 23 & 0xff;
+ buf[1] = reg >> 15 & 0xff;
+ buf[2] = reg >> 7 & 0xff;
+ buf[3] = (reg & 0x7f) << 1;
+
+ ret = i2c_master_send(i2c, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(i2c, buf, 2);
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_recv returned %d\n", ret);
+ return ret;
+ }
+
+ *val = buf[0] << 8 | buf[1];
+
+ return 0;
+}
+
+static int xrs700x_i2c_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ unsigned char buf[6];
+ int ret;
+
+ buf[0] = reg >> 23 & 0xff;
+ buf[1] = reg >> 15 & 0xff;
+ buf[2] = reg >> 7 & 0xff;
+ buf[3] = (reg & 0x7f) << 1 | 1;
+ buf[4] = val >> 8 & 0xff;
+ buf[5] = val & 0xff;
+
+ ret = i2c_master_send(i2c, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config xrs700x_i2c_regmap_config = {
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_bits = 32,
+ .pad_bits = 0,
+ .write_flag_mask = 0,
+ .read_flag_mask = 0,
+ .reg_read = xrs700x_i2c_reg_read,
+ .reg_write = xrs700x_i2c_reg_write,
+ .max_register = 0,
+ .cache_type = REGCACHE_NONE,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG
+};
+
+static int xrs700x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *i2c_id)
+{
+ struct xrs700x *priv;
+ int ret;
+
+ priv = xrs700x_switch_alloc(&i2c->dev, i2c);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&i2c->dev, NULL, &i2c->dev,
+ &xrs700x_i2c_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&i2c->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, priv);
+
+ ret = xrs700x_switch_register(priv);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xrs700x_i2c_remove(struct i2c_client *i2c)
+{
+ struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+ xrs700x_switch_remove(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id xrs700x_i2c_id[] = {
+ { "xrs700x-switch", 0 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
+
+static const struct of_device_id xrs700x_i2c_dt_ids[] = {
+ { .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
+ { .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
+ { .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
+ { .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrs700x_i2c_dt_ids);
+
+static struct i2c_driver xrs700x_i2c_driver = {
+ .driver = {
+ .name = "xrs700x-i2c",
+ .of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
+ },
+ .probe = xrs700x_i2c_probe,
+ .remove = xrs700x_i2c_remove,
+ .id_table = xrs700x_i2c_id,
+};
+
+module_i2c_driver(xrs700x_i2c_driver);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
new file mode 100644
index 000000000000..a10ee28eb86e
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+#define XRS_MDIO_IBA0 0x10
+#define XRS_MDIO_IBA1 0x11
+#define XRS_MDIO_IBD 0x14
+
+#define XRS_IB_READ 0x0
+#define XRS_IB_WRITE 0x1
+
+static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct mdio_device *mdiodev = context;
+ struct device *dev = &mdiodev->dev;
+ u16 uval;
+ int ret;
+
+ uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ ret = mdiobus_read(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_read returned %d\n", ret);
+ return ret;
+ }
+
+ *val = (unsigned int)ret;
+
+ return 0;
+}
+
+static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct mdio_device *mdiodev = context;
+ struct device *dev = &mdiodev->dev;
+ u16 uval;
+ int ret;
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD, (u16)val);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE);
+
+ ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config xrs700x_mdio_regmap_config = {
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_bits = 32,
+ .pad_bits = 0,
+ .write_flag_mask = 0,
+ .read_flag_mask = 0,
+ .reg_read = xrs700x_mdio_reg_read,
+ .reg_write = xrs700x_mdio_reg_write,
+ .max_register = XRS_VLAN(VLAN_N_VID - 1),
+ .cache_type = REGCACHE_NONE,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG
+};
+
+static int xrs700x_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv;
+ int ret;
+
+ priv = xrs700x_switch_alloc(&mdiodev->dev, mdiodev);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, mdiodev,
+ &xrs700x_mdio_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&mdiodev->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ ret = xrs700x_switch_register(priv);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+ xrs700x_switch_remove(priv);
+}
+
+static const struct of_device_id xrs700x_mdio_dt_ids[] = {
+ { .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
+ { .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
+ { .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
+ { .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrs700x_mdio_dt_ids);
+
+static struct mdio_driver xrs700x_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "xrs700x-mdio",
+ .of_match_table = xrs700x_mdio_dt_ids,
+ },
+ .probe = xrs700x_mdio_probe,
+ .remove = xrs700x_mdio_remove,
+};
+
+mdio_module_driver(xrs700x_mdio_driver);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA MDIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x_reg.h b/drivers/net/dsa/xrs700x/xrs700x_reg.h
new file mode 100644
index 000000000000..a135d4d92b6d
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_reg.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Register Base Addresses */
+#define XRS_DEVICE_ID_BASE 0x0
+#define XRS_GPIO_BASE 0x10000
+#define XRS_PORT_OFFSET 0x10000
+#define XRS_PORT_BASE(x) (0x200000 + XRS_PORT_OFFSET * (x))
+#define XRS_RTC_BASE 0x280000
+#define XRS_TS_OFFSET 0x8000
+#define XRS_TS_BASE(x) (0x290000 + XRS_TS_OFFSET * (x))
+#define XRS_SWITCH_CONF_BASE 0x300000
+
+/* Device Identification Registers */
+#define XRS_DEV_ID0 (XRS_DEVICE_ID_BASE + 0)
+#define XRS_DEV_ID1 (XRS_DEVICE_ID_BASE + 2)
+#define XRS_INT_ID0 (XRS_DEVICE_ID_BASE + 4)
+#define XRS_INT_ID1 (XRS_DEVICE_ID_BASE + 6)
+#define XRS_REV_ID (XRS_DEVICE_ID_BASE + 8)
+
+/* GPIO Registers */
+#define XRS_CONFIG0 (XRS_GPIO_BASE + 0x1000)
+#define XRS_INPUT_STATUS0 (XRS_GPIO_BASE + 0x1002)
+#define XRS_CONFIG1 (XRS_GPIO_BASE + 0x1004)
+#define XRS_INPUT_STATUS1 (XRS_GPIO_BASE + 0x1006)
+#define XRS_CONFIG2 (XRS_GPIO_BASE + 0x1008)
+#define XRS_INPUT_STATUS2 (XRS_GPIO_BASE + 0x100a)
+
+/* Port Configuration Registers */
+#define XRS_PORT_GEN_BASE(x) (XRS_PORT_BASE(x) + 0x0)
+#define XRS_PORT_HSR_BASE(x) (XRS_PORT_BASE(x) + 0x2000)
+#define XRS_PORT_PTP_BASE(x) (XRS_PORT_BASE(x) + 0x4000)
+#define XRS_PORT_CNT_BASE(x) (XRS_PORT_BASE(x) + 0x6000)
+#define XRS_PORT_IPO_BASE(x) (XRS_PORT_BASE(x) + 0x8000)
+
+/* Port Configuration Registers - General and State */
+#define XRS_PORT_STATE(x) (XRS_PORT_GEN_BASE(x) + 0x0)
+#define XRS_PORT_FORWARDING 0
+#define XRS_PORT_LEARNING 1
+#define XRS_PORT_DISABLED 2
+#define XRS_PORT_MODE_NORMAL 0
+#define XRS_PORT_MODE_MANAGEMENT 1
+#define XRS_PORT_SPEED_1000 0x12
+#define XRS_PORT_SPEED_100 0x20
+#define XRS_PORT_SPEED_10 0x30
+#define XRS_PORT_VLAN(x) (XRS_PORT_GEN_BASE(x) + 0x10)
+#define XRS_PORT_VLAN0_MAPPING(x) (XRS_PORT_GEN_BASE(x) + 0x12)
+#define XRS_PORT_FWD_MASK(x) (XRS_PORT_GEN_BASE(x) + 0x14)
+#define XRS_PORT_VLAN_PRIO(x) (XRS_PORT_GEN_BASE(x) + 0x16)
+
+/* Port Configuration Registers - HSR/PRP */
+#define XRS_HSR_CFG(x) (XRS_PORT_HSR_BASE(x) + 0x0)
+
+/* Port Configuration Registers - PTP */
+#define XRS_PTP_RX_SYNC_DELAY_NS_LO(x) (XRS_PORT_PTP_BASE(x) + 0x2)
+#define XRS_PTP_RX_SYNC_DELAY_NS_HI(x) (XRS_PORT_PTP_BASE(x) + 0x4)
+#define XRS_PTP_RX_EVENT_DELAY_NS(x) (XRS_PORT_PTP_BASE(x) + 0xa)
+#define XRS_PTP_TX_EVENT_DELAY_NS(x) (XRS_PORT_PTP_BASE(x) + 0x12)
+
+/* Port Configuration Registers - Counter */
+#define XRS_CNT_CTRL(x) (XRS_PORT_CNT_BASE(x) + 0x0)
+#define XRS_RX_GOOD_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x200)
+#define XRS_RX_GOOD_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x202)
+#define XRS_RX_BAD_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x204)
+#define XRS_RX_BAD_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x206)
+#define XRS_RX_UNICAST_L (XRS_PORT_CNT_BASE(0) + 0x208)
+#define XRS_RX_UNICAST_H (XRS_PORT_CNT_BASE(0) + 0x20a)
+#define XRS_RX_BROADCAST_L (XRS_PORT_CNT_BASE(0) + 0x20c)
+#define XRS_RX_BROADCAST_H (XRS_PORT_CNT_BASE(0) + 0x20e)
+#define XRS_RX_MULTICAST_L (XRS_PORT_CNT_BASE(0) + 0x210)
+#define XRS_RX_MULTICAST_H (XRS_PORT_CNT_BASE(0) + 0x212)
+#define XRS_RX_UNDERSIZE_L (XRS_PORT_CNT_BASE(0) + 0x214)
+#define XRS_RX_UNDERSIZE_H (XRS_PORT_CNT_BASE(0) + 0x216)
+#define XRS_RX_FRAGMENTS_L (XRS_PORT_CNT_BASE(0) + 0x218)
+#define XRS_RX_FRAGMENTS_H (XRS_PORT_CNT_BASE(0) + 0x21a)
+#define XRS_RX_OVERSIZE_L (XRS_PORT_CNT_BASE(0) + 0x21c)
+#define XRS_RX_OVERSIZE_H (XRS_PORT_CNT_BASE(0) + 0x21e)
+#define XRS_RX_JABBER_L (XRS_PORT_CNT_BASE(0) + 0x220)
+#define XRS_RX_JABBER_H (XRS_PORT_CNT_BASE(0) + 0x222)
+#define XRS_RX_ERR_L (XRS_PORT_CNT_BASE(0) + 0x224)
+#define XRS_RX_ERR_H (XRS_PORT_CNT_BASE(0) + 0x226)
+#define XRS_RX_CRC_L (XRS_PORT_CNT_BASE(0) + 0x228)
+#define XRS_RX_CRC_H (XRS_PORT_CNT_BASE(0) + 0x22a)
+#define XRS_RX_64_L (XRS_PORT_CNT_BASE(0) + 0x22c)
+#define XRS_RX_64_H (XRS_PORT_CNT_BASE(0) + 0x22e)
+#define XRS_RX_65_127_L (XRS_PORT_CNT_BASE(0) + 0x230)
+#define XRS_RX_65_127_H (XRS_PORT_CNT_BASE(0) + 0x232)
+#define XRS_RX_128_255_L (XRS_PORT_CNT_BASE(0) + 0x234)
+#define XRS_RX_128_255_H (XRS_PORT_CNT_BASE(0) + 0x236)
+#define XRS_RX_256_511_L (XRS_PORT_CNT_BASE(0) + 0x238)
+#define XRS_RX_256_511_H (XRS_PORT_CNT_BASE(0) + 0x23a)
+#define XRS_RX_512_1023_L (XRS_PORT_CNT_BASE(0) + 0x23c)
+#define XRS_RX_512_1023_H (XRS_PORT_CNT_BASE(0) + 0x23e)
+#define XRS_RX_1024_1536_L (XRS_PORT_CNT_BASE(0) + 0x240)
+#define XRS_RX_1024_1536_H (XRS_PORT_CNT_BASE(0) + 0x242)
+#define XRS_RX_HSR_PRP_L (XRS_PORT_CNT_BASE(0) + 0x244)
+#define XRS_RX_HSR_PRP_H (XRS_PORT_CNT_BASE(0) + 0x246)
+#define XRS_RX_WRONGLAN_L (XRS_PORT_CNT_BASE(0) + 0x248)
+#define XRS_RX_WRONGLAN_H (XRS_PORT_CNT_BASE(0) + 0x24a)
+#define XRS_RX_DUPLICATE_L (XRS_PORT_CNT_BASE(0) + 0x24c)
+#define XRS_RX_DUPLICATE_H (XRS_PORT_CNT_BASE(0) + 0x24e)
+#define XRS_TX_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x280)
+#define XRS_TX_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x282)
+#define XRS_TX_UNICAST_L (XRS_PORT_CNT_BASE(0) + 0x284)
+#define XRS_TX_UNICAST_H (XRS_PORT_CNT_BASE(0) + 0x286)
+#define XRS_TX_BROADCAST_L (XRS_PORT_CNT_BASE(0) + 0x288)
+#define XRS_TX_BROADCAST_H (XRS_PORT_CNT_BASE(0) + 0x28a)
+#define XRS_TX_MULTICAST_L (XRS_PORT_CNT_BASE(0) + 0x28c)
+#define XRS_TX_MULTICAST_H (XRS_PORT_CNT_BASE(0) + 0x28e)
+#define XRS_TX_HSR_PRP_L (XRS_PORT_CNT_BASE(0) + 0x290)
+#define XRS_TX_HSR_PRP_H (XRS_PORT_CNT_BASE(0) + 0x292)
+#define XRS_PRIQ_DROP_L (XRS_PORT_CNT_BASE(0) + 0x2c0)
+#define XRS_PRIQ_DROP_H (XRS_PORT_CNT_BASE(0) + 0x2c2)
+#define XRS_EARLY_DROP_L (XRS_PORT_CNT_BASE(0) + 0x2c4)
+#define XRS_EARLY_DROP_H (XRS_PORT_CNT_BASE(0) + 0x2c6)
+
+/* Port Configuration Registers - Inbound Policy 0 - 15 */
+#define XRS_ETH_ADDR_CFG(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x0)
+#define XRS_ETH_ADDR_FWD_ALLOW(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x2)
+#define XRS_ETH_ADDR_FWD_MIRROR(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x4)
+#define XRS_ETH_ADDR_0(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x8)
+#define XRS_ETH_ADDR_1(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0xa)
+#define XRS_ETH_ADDR_2(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0xc)
+
+/* RTC Registers */
+#define XRS_CUR_NSEC0 (XRS_RTC_BASE + 0x1004)
+#define XRS_CUR_NSEC1 (XRS_RTC_BASE + 0x1006)
+#define XRS_CUR_SEC0 (XRS_RTC_BASE + 0x1008)
+#define XRS_CUR_SEC1 (XRS_RTC_BASE + 0x100a)
+#define XRS_CUR_SEC2 (XRS_RTC_BASE + 0x100c)
+#define XRS_TIME_CC0 (XRS_RTC_BASE + 0x1010)
+#define XRS_TIME_CC1 (XRS_RTC_BASE + 0x1012)
+#define XRS_TIME_CC2 (XRS_RTC_BASE + 0x1014)
+#define XRS_STEP_SIZE0 (XRS_RTC_BASE + 0x1020)
+#define XRS_STEP_SIZE1 (XRS_RTC_BASE + 0x1022)
+#define XRS_STEP_SIZE2 (XRS_RTC_BASE + 0x1024)
+#define XRS_ADJUST_NSEC0 (XRS_RTC_BASE + 0x1034)
+#define XRS_ADJUST_NSEC1 (XRS_RTC_BASE + 0x1036)
+#define XRS_ADJUST_SEC0 (XRS_RTC_BASE + 0x1038)
+#define XRS_ADJUST_SEC1 (XRS_RTC_BASE + 0x103a)
+#define XRS_ADJUST_SEC2 (XRS_RTC_BASE + 0x103c)
+#define XRS_TIME_CMD (XRS_RTC_BASE + 0x1040)
+
+/* Time Stamper Registers */
+#define XRS_TS_CTRL(x) (XRS_TS_BASE(x) + 0x1000)
+#define XRS_TS_INT_MASK(x) (XRS_TS_BASE(x) + 0x1008)
+#define XRS_TS_INT_STATUS(x) (XRS_TS_BASE(x) + 0x1010)
+#define XRS_TS_NSEC0(x) (XRS_TS_BASE(x) + 0x1104)
+#define XRS_TS_NSEC1(x) (XRS_TS_BASE(x) + 0x1106)
+#define XRS_TS_SEC0(x) (XRS_TS_BASE(x) + 0x1108)
+#define XRS_TS_SEC1(x) (XRS_TS_BASE(x) + 0x110a)
+#define XRS_TS_SEC2(x) (XRS_TS_BASE(x) + 0x110c)
+#define XRS_PNCT0(x) (XRS_TS_BASE(x) + 0x1110)
+#define XRS_PNCT1(x) (XRS_TS_BASE(x) + 0x1112)
+
+/* Switch Configuration Registers */
+#define XRS_SWITCH_GEN_BASE (XRS_SWITCH_CONF_BASE + 0x0)
+#define XRS_SWITCH_TS_BASE (XRS_SWITCH_CONF_BASE + 0x2000)
+#define XRS_SWITCH_VLAN_BASE (XRS_SWITCH_CONF_BASE + 0x4000)
+
+/* Switch Configuration Registers - General */
+#define XRS_GENERAL (XRS_SWITCH_GEN_BASE + 0x10)
+#define XRS_GENERAL_TIME_TRAILER BIT(9)
+#define XRS_GENERAL_MOD_SYNC BIT(10)
+#define XRS_GENERAL_CUT_THRU BIT(13)
+#define XRS_GENERAL_CLR_MAC_TBL BIT(14)
+#define XRS_GENERAL_RESET BIT(15)
+#define XRS_MT_CLEAR_MASK (XRS_SWITCH_GEN_BASE + 0x12)
+#define XRS_ADDRESS_AGING (XRS_SWITCH_GEN_BASE + 0x20)
+#define XRS_TS_CTRL_TX (XRS_SWITCH_GEN_BASE + 0x28)
+#define XRS_TS_CTRL_RX (XRS_SWITCH_GEN_BASE + 0x2a)
+#define XRS_INT_MASK (XRS_SWITCH_GEN_BASE + 0x2c)
+#define XRS_INT_STATUS (XRS_SWITCH_GEN_BASE + 0x2e)
+#define XRS_MAC_TABLE0 (XRS_SWITCH_GEN_BASE + 0x200)
+#define XRS_MAC_TABLE1 (XRS_SWITCH_GEN_BASE + 0x202)
+#define XRS_MAC_TABLE2 (XRS_SWITCH_GEN_BASE + 0x204)
+#define XRS_MAC_TABLE3 (XRS_SWITCH_GEN_BASE + 0x206)
+
+/* Switch Configuration Registers - Frame Timestamp */
+#define XRS_TX_TS_NS_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x0)
+#define XRS_TX_TS_NS_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x2)
+#define XRS_TX_TS_S_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x4)
+#define XRS_TX_TS_S_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x6)
+#define XRS_TX_TS_HDR(t, h) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x2 * (h) + 0xe)
+#define XRS_RX_TS_NS_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x200)
+#define XRS_RX_TS_NS_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x202)
+#define XRS_RX_TS_S_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x204)
+#define XRS_RX_TS_S_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x206)
+#define XRS_RX_TS_HDR(t, h) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x2 * (h) + 0xe)
+
+/* Switch Configuration Registers - VLAN */
+#define XRS_VLAN(v) (XRS_SWITCH_VLAN_BASE + 0x2 * (v))
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 06596fa1f9fe..1db6cfd2b55c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1585,10 +1585,9 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
int ret;
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- xdp->data = page_address(rx_info->page) + rx_info->page_offset;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_hard_start = page_address(rx_info->page);
- xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+ xdp_prepare_buff(xdp, page_address(rx_info->page),
+ rx_info->page_offset,
+ rx_ring->ena_bufs[0].len, false);
/* If for some reason we received a bigger packet than
* we expect, then we simply drop it
*/
@@ -1634,8 +1633,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
- xdp.rxq = &rx_ring->xdp_rxq;
- xdp.frame_sz = ENA_PAGE_SIZE;
+ xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do {
xdp_verdict = XDP_PASS;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2709a2db5657..99b6d5a9f1d9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2295,8 +2295,6 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_setup_tc = xgbe_setup_tc,
.ndo_fix_features = xgbe_fix_features,
.ndo_set_features = xgbe_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = xgbe_features_check,
};
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7b79528d6eed..4bdf8fbe75a6 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -174,7 +174,6 @@ config BGMAC_BCMA
config BGMAC_PLATFORM
tristate "Broadcom iProc GBit platform support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on OF
select BGMAC
select PHYLIB
select FIXED_PHY
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 916824cca3fd..fd8767213165 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -220,7 +220,7 @@ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
/*
* refill rx queue
*/
-static int bcm_enet_refill_rx(struct net_device *dev)
+static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
{
struct bcm_enet_priv *priv;
@@ -228,26 +228,29 @@ static int bcm_enet_refill_rx(struct net_device *dev)
while (priv->rx_desc_count < priv->rx_ring_size) {
struct bcm_enet_desc *desc;
- struct sk_buff *skb;
- dma_addr_t p;
int desc_idx;
u32 len_stat;
desc_idx = priv->rx_dirty_desc;
desc = &priv->rx_desc_cpu[desc_idx];
- if (!priv->rx_skb[desc_idx]) {
- skb = netdev_alloc_skb(dev, priv->rx_skb_size);
- if (!skb)
+ if (!priv->rx_buf[desc_idx]) {
+ void *buf;
+
+ if (likely(napi_mode))
+ buf = napi_alloc_frag(priv->rx_frag_size);
+ else
+ buf = netdev_alloc_frag(priv->rx_frag_size);
+ if (unlikely(!buf))
break;
- priv->rx_skb[desc_idx] = skb;
- p = dma_map_single(&priv->pdev->dev, skb->data,
- priv->rx_skb_size,
- DMA_FROM_DEVICE);
- desc->address = p;
+ priv->rx_buf[desc_idx] = buf;
+ desc->address = dma_map_single(&priv->pdev->dev,
+ buf + priv->rx_buf_offset,
+ priv->rx_buf_size,
+ DMA_FROM_DEVICE);
}
- len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
+ len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
len_stat |= DMADESC_OWNER_MASK;
if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
@@ -287,7 +290,7 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, false);
spin_unlock(&priv->rx_lock);
}
@@ -297,10 +300,12 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
struct bcm_enet_priv *priv;
+ struct list_head rx_list;
struct device *kdev;
int processed;
priv = netdev_priv(dev);
+ INIT_LIST_HEAD(&rx_list);
kdev = &priv->pdev->dev;
processed = 0;
@@ -315,6 +320,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
int desc_idx;
u32 len_stat;
unsigned int len;
+ void *buf;
desc_idx = priv->rx_curr_desc;
desc = &priv->rx_desc_cpu[desc_idx];
@@ -333,7 +339,6 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_curr_desc++;
if (priv->rx_curr_desc == priv->rx_ring_size)
priv->rx_curr_desc = 0;
- priv->rx_desc_count--;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
@@ -360,16 +365,14 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
/* valid packet */
- skb = priv->rx_skb[desc_idx];
+ buf = priv->rx_buf[desc_idx];
len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
/* don't include FCS */
len -= 4;
if (len < copybreak) {
- struct sk_buff *nskb;
-
- nskb = napi_alloc_skb(&priv->napi, len);
- if (!nskb) {
+ skb = napi_alloc_skb(&priv->napi, len);
+ if (unlikely(!skb)) {
/* forget packet, just rearm desc */
dev->stats.rx_dropped++;
continue;
@@ -377,26 +380,36 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
dma_sync_single_for_cpu(kdev, desc->address,
len, DMA_FROM_DEVICE);
- memcpy(nskb->data, skb->data, len);
+ memcpy(skb->data, buf + priv->rx_buf_offset, len);
dma_sync_single_for_device(kdev, desc->address,
len, DMA_FROM_DEVICE);
- skb = nskb;
} else {
- dma_unmap_single(&priv->pdev->dev, desc->address,
- priv->rx_skb_size, DMA_FROM_DEVICE);
- priv->rx_skb[desc_idx] = NULL;
+ dma_unmap_single(kdev, desc->address,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ priv->rx_buf[desc_idx] = NULL;
+
+ skb = build_skb(buf, priv->rx_frag_size);
+ if (unlikely(!skb)) {
+ skb_free_frag(buf);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, priv->rx_buf_offset);
}
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- netif_receive_skb(skb);
+ list_add_tail(&skb->list, &rx_list);
- } while (--budget > 0);
+ } while (processed < budget);
+
+ netif_receive_skb_list(&rx_list);
+ priv->rx_desc_count -= processed;
if (processed || !priv->rx_desc_count) {
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, true);
/* kick rx dma */
enet_dmac_writel(priv, priv->dma_chan_en_mask,
@@ -413,9 +426,11 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
{
struct bcm_enet_priv *priv;
+ unsigned int bytes;
int released;
priv = netdev_priv(dev);
+ bytes = 0;
released = 0;
while (priv->tx_desc_count < priv->tx_ring_size) {
@@ -452,10 +467,13 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
if (desc->len_stat & DMADESC_UNDER_MASK)
dev->stats.tx_errors++;
+ bytes += skb->len;
dev_kfree_skb(skb);
released++;
}
+ netdev_completed_queue(dev, released, bytes);
+
if (netif_queue_stopped(dev) && released)
netif_wake_queue(dev);
@@ -622,8 +640,11 @@ bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc->len_stat = len_stat;
wmb();
+ netdev_sent_queue(dev, skb->len);
+
/* kick tx dma */
- enet_dmac_writel(priv, priv->dma_chan_en_mask,
+ if (!netdev_xmit_more() || !priv->tx_desc_count)
+ enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->tx_chan);
/* stop queue if no more desc available */
@@ -845,6 +866,24 @@ static void bcm_enet_adjust_link(struct net_device *dev)
priv->pause_tx ? "tx" : "off");
}
+static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct bcm_enet_desc *desc;
+
+ if (!priv->rx_buf[i])
+ continue;
+
+ desc = &priv->rx_desc_cpu[i];
+ dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
+ DMA_FROM_DEVICE);
+ skb_free_frag(priv->rx_buf[i]);
+ }
+ kfree(priv->rx_buf);
+}
+
/*
* open callback, allocate dma rings & buffers and start rx operation
*/
@@ -954,10 +993,10 @@ static int bcm_enet_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
+ if (!priv->rx_buf) {
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -974,8 +1013,8 @@ static int bcm_enet_open(struct net_device *dev)
enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMAC_BUFALLOC, priv->rx_chan);
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -1069,18 +1108,7 @@ static int bcm_enet_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -1159,12 +1187,12 @@ static int bcm_enet_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
@@ -1186,21 +1214,10 @@ static int bcm_enet_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -1622,9 +1639,12 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
* align rx buffer size to dma burst len, account FCS since
* it's appended
*/
- priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
+ priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
priv->dma_maxburst * 4);
+ priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
dev->mtu = new_mtu;
return 0;
}
@@ -1709,6 +1729,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->enet_is_sw = false;
priv->dma_maxburst = BCMENET_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD;
ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
@@ -2126,7 +2147,7 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ dev_err(kdev, "cannot allocate tx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
@@ -2136,11 +2157,11 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (!priv->rx_buf) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -2187,8 +2208,8 @@ static int bcm_enetsw_open(struct net_device *dev)
enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMA_BUFALLOC_REG(priv->rx_chan));
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -2287,18 +2308,7 @@ static int bcm_enetsw_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -2327,13 +2337,13 @@ static int bcm_enetsw_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
del_timer_sync(&priv->swphy_poll);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
napi_disable(&priv->napi);
del_timer_sync(&priv->rx_timeout);
@@ -2348,21 +2358,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -2659,6 +2658,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
pd = dev_get_platdata(&pdev->dev);
if (pd) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 1d3c917eb830..78f1830fb3cb 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -230,11 +230,17 @@ struct bcm_enet_priv {
/* next dirty rx descriptor to refill */
int rx_dirty_desc;
- /* size of allocated rx skbs */
- unsigned int rx_skb_size;
+ /* size of allocated rx buffers */
+ unsigned int rx_buf_size;
- /* list of skb given to hw for rx */
- struct sk_buff **rx_skb;
+ /* allocated rx buffer offset */
+ unsigned int rx_buf_offset;
+
+ /* size of allocated rx frag */
+ unsigned int rx_frag_size;
+
+ /* list of buffer given to hw for rx */
+ void **rx_buf;
/* used when rx skb allocation failed, so we defer rx queue
* refill */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0404aafd5ce5..777bbf6d2586 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -2310,33 +2311,22 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_select_queue = bcm_sysport_select_queue,
};
-static int bcm_sysport_map_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_map_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
unsigned int q, qp, port;
- struct net_device *dev;
-
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
/* We can't be setting up queue inspection for non directly attached
* switches
*/
- if (info->switch_number)
+ if (dp->ds->index)
return 0;
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
* 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
@@ -2376,27 +2366,16 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_unmap_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_unmap_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
- struct net_device *dev;
unsigned int q, qp, port;
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
-
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
num_tx_queues = slave_dev->real_num_tx_queues;
@@ -2417,17 +2396,30 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int bcm_sysport_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
- int ret = NOTIFY_DONE;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct bcm_sysport_priv *priv;
+ int ret = 0;
+
+ priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
+ if (priv->netdev != dev)
+ return NOTIFY_DONE;
switch (event) {
- case DSA_PORT_REGISTER:
- ret = bcm_sysport_map_queues(nb, ptr);
- break;
- case DSA_PORT_UNREGISTER:
- ret = bcm_sysport_unmap_queues(nb, ptr);
+ case NETDEV_CHANGEUPPER:
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return NOTIFY_DONE;
+
+ if (!dsa_slave_dev_check(info->upper_dev))
+ return NOTIFY_DONE;
+
+ if (info->linking)
+ ret = bcm_sysport_map_queues(dev, info->upper_dev);
+ else
+ ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
break;
}
@@ -2602,9 +2594,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rx_max_coalesced_frames = 1;
u64_stats_init(&priv->syncp);
- priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
+ priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
- ret = register_dsa_notifier(&priv->dsa_notifier);
+ ret = register_netdevice_notifier(&priv->netdev_notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register DSA notifier\n");
goto err_deregister_fixed_link;
@@ -2631,7 +2623,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
err_deregister_notifier:
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
@@ -2649,7 +2641,7 @@ static int bcm_sysport_remove(struct platform_device *pdev)
/* Not much to do, ndo_close has been called
* and we use managed allocations
*/
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
unregister_netdev(dev);
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 3a5cb6f128f5..984f76e74b43 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/dim.h>
+#include "unimac.h"
+
/* Receive/transmit descriptor format */
#define DESC_ADDR_HI_STATUS_LEN 0x00
#define DESC_ADDR_HI_SHIFT 0
@@ -213,39 +215,6 @@ struct bcm_rsb {
/* UniMAC offset and defines */
#define SYS_PORT_UMAC_OFFSET 0x800
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_10 0
-#define CMD_SPEED_100 1
-#define CMD_SPEED_1000 2
-#define CMD_SPEED_2500 3
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00c
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
@@ -787,7 +756,7 @@ struct bcm_sysport_priv {
struct u64_stats_sync syncp;
/* map information between switch port queues and local queues */
- struct notifier_block dsa_notifier;
+ struct notifier_block netdev_notifier;
unsigned int per_port_num_tx_queues;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 98ec1b8a7d8e..075f6e146b29 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -746,25 +746,25 @@ error:
/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
* nothing to change? Try if after stabilizng driver.
*/
-static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
- bool force)
+static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+ bool force)
{
- u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ u32 cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
u32 new_val = (cmdcfg & mask) | set;
u32 cmdcfg_sr;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~0, cmdcfg_sr);
udelay(2);
if (new_val != cmdcfg || force)
- bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+ bgmac_umac_write(bgmac, UMAC_CMD, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~cmdcfg_sr, 0);
udelay(2);
}
@@ -773,9 +773,9 @@ static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
u32 tmp;
tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
- bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC0, tmp);
tmp = (addr[4] << 8) | addr[5];
- bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC1, tmp);
}
static void bgmac_set_rx_mode(struct net_device *net_dev)
@@ -783,9 +783,9 @@ static void bgmac_set_rx_mode(struct net_device *net_dev)
struct bgmac *bgmac = netdev_priv(net_dev);
if (net_dev->flags & IFF_PROMISC)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_PROMISC, true);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_PROMISC, 0, true);
}
#if 0 /* We don't use that regs yet */
@@ -825,21 +825,21 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
static void bgmac_mac_speed(struct bgmac *bgmac)
{
- u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+ u32 mask = ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT | CMD_HD_EN);
u32 set = 0;
switch (bgmac->mac_speed) {
case SPEED_10:
- set |= BGMAC_CMDCFG_ES_10;
+ set |= CMD_SPEED_10 << CMD_SPEED_SHIFT;
break;
case SPEED_100:
- set |= BGMAC_CMDCFG_ES_100;
+ set |= CMD_SPEED_100 << CMD_SPEED_SHIFT;
break;
case SPEED_1000:
- set |= BGMAC_CMDCFG_ES_1000;
+ set |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
break;
case SPEED_2500:
- set |= BGMAC_CMDCFG_ES_2500;
+ set |= CMD_SPEED_2500 << CMD_SPEED_SHIFT;
break;
default:
dev_err(bgmac->dev, "Unsupported speed: %d\n",
@@ -847,9 +847,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
}
if (bgmac->mac_duplex == DUPLEX_HALF)
- set |= BGMAC_CMDCFG_HD;
+ set |= CMD_HD_EN;
- bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+ bgmac_umac_cmd_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
@@ -917,7 +917,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
udelay(1);
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
@@ -986,34 +986,34 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
- * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
- * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+ * Specs don't say about using UMAC_CMD_SR, but in this routine
+ * UMAC_CMD is read _after_ putting chip in a reset. So it has to
* be keps until taking MAC out of the reset.
*/
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
-
- bgmac_cmdcfg_maskset(bgmac,
- ~(BGMAC_CMDCFG_TE |
- BGMAC_CMDCFG_RE |
- BGMAC_CMDCFG_RPI |
- BGMAC_CMDCFG_TAI |
- BGMAC_CMDCFG_HD |
- BGMAC_CMDCFG_ML |
- BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_RL |
- BGMAC_CMDCFG_RED |
- BGMAC_CMDCFG_PE |
- BGMAC_CMDCFG_TPI |
- BGMAC_CMDCFG_PAD_EN |
- BGMAC_CMDCFG_PF),
- BGMAC_CMDCFG_PROM |
- BGMAC_CMDCFG_NLC |
- BGMAC_CMDCFG_CFE |
- cmdcfg_sr,
- false);
+ cmdcfg_sr = CMD_SW_RESET_OLD;
+
+ bgmac_umac_cmd_maskset(bgmac,
+ ~(CMD_TX_EN |
+ CMD_RX_EN |
+ CMD_RX_PAUSE_IGNORE |
+ CMD_TX_ADDR_INS |
+ CMD_HD_EN |
+ CMD_LCL_LOOP_EN |
+ CMD_CNTL_FRM_EN |
+ CMD_RMT_LOOP_EN |
+ CMD_RX_ERR_DISC |
+ CMD_PRBL_EN |
+ CMD_TX_PAUSE_IGNORE |
+ CMD_PAD_EN |
+ CMD_PAUSE_FWD),
+ CMD_PROMISC |
+ CMD_NO_LEN_CHK |
+ CMD_CNTL_FRM_EN |
+ cmdcfg_sr,
+ false);
bgmac->mac_speed = SPEED_UNKNOWN;
bgmac->mac_duplex = DUPLEX_UNKNOWN;
@@ -1049,16 +1049,16 @@ static void bgmac_enable(struct bgmac *bgmac)
u32 mode;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
- bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- cmdcfg_sr, true);
+ cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
+ bgmac_umac_cmd_maskset(bgmac, ~(CMD_TX_EN | CMD_RX_EN),
+ cmdcfg_sr, true);
udelay(2);
- cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
- bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+ cmdcfg |= CMD_TX_EN | CMD_RX_EN;
+ bgmac_umac_write(bgmac, UMAC_CMD, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
@@ -1078,7 +1078,7 @@ static void bgmac_enable(struct bgmac *bgmac)
fl_ctl = 0x03cb04cb;
bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
- bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+ bgmac_umac_write(bgmac, UMAC_PAUSE_CTRL, 0x27fff);
}
if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
@@ -1105,18 +1105,18 @@ static void bgmac_chip_init(struct bgmac *bgmac)
bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
/* Enable 802.3x tx flow control (honor received PAUSE frames) */
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_RX_PAUSE_IGNORE, 0, true);
bgmac_set_rx_mode(bgmac->net_dev);
bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
if (bgmac->loopback)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_LCL_LOOP_EN, 0, false);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + ETHER_MAX_LEN);
bgmac_chip_intrs_on(bgmac);
@@ -1252,7 +1252,7 @@ static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + mtu);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 351c598a3ec6..110088e662ea 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -4,6 +4,8 @@
#include <linux/netdevice.h>
+#include "unimac.h"
+
#define BGMAC_DEV_CTL 0x000
#define BGMAC_DC_TSM 0x00000002
#define BGMAC_DC_CFCO 0x00000004
@@ -169,47 +171,7 @@
#define BGMAC_RX_NONPAUSE_PKTS 0x420
#define BGMAC_RX_SACHANGES 0x424
#define BGMAC_RX_UNI_PKTS 0x428
-#define BGMAC_UNIMAC_VERSION 0x800
-#define BGMAC_HDBKP_CTL 0x804
-#define BGMAC_CMDCFG 0x808 /* Configuration */
-#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
-#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
-#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
-#define BGMAC_CMDCFG_ES_10 0x00000000
-#define BGMAC_CMDCFG_ES_100 0x00000004
-#define BGMAC_CMDCFG_ES_1000 0x00000008
-#define BGMAC_CMDCFG_ES_2500 0x0000000C
-#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
-#define BGMAC_CMDCFG_PAD_EN 0x00000020
-#define BGMAC_CMDCFG_CF 0x00000040
-#define BGMAC_CMDCFG_PF 0x00000080
-#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
-#define BGMAC_CMDCFG_TAI 0x00000200
-#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
-#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
-#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
-#define BGMAC_CMDCFG_AE 0x00400000
-#define BGMAC_CMDCFG_CFE 0x00800000
-#define BGMAC_CMDCFG_NLC 0x01000000
-#define BGMAC_CMDCFG_RL 0x02000000
-#define BGMAC_CMDCFG_RED 0x04000000
-#define BGMAC_CMDCFG_PE 0x08000000
-#define BGMAC_CMDCFG_TPI 0x10000000
-#define BGMAC_CMDCFG_AT 0x20000000
-#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
-#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
-#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
-#define BGMAC_PAUSEQUANTA 0x818
-#define BGMAC_MAC_MODE 0x844
-#define BGMAC_OUTERTAG 0x848
-#define BGMAC_INNERTAG 0x84c
-#define BGMAC_TXIPG 0x85c
-#define BGMAC_PAUSE_CTL 0xb30
-#define BGMAC_TX_FLUSH 0xb34
-#define BGMAC_RX_STATUS 0xb38
-#define BGMAC_TX_STATUS 0xb3c
+#define BGMAC_UNIMAC 0x800
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
@@ -556,6 +518,16 @@ static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
bgmac->write(bgmac, offset, value);
}
+static inline u32 bgmac_umac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bgmac_read(bgmac, BGMAC_UNIMAC + offset);
+}
+
+static inline void bgmac_umac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bgmac_write(bgmac, BGMAC_UNIMAC + offset, value);
+}
+
static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset)
{
return bgmac->idm_read(bgmac, offset);
@@ -609,6 +581,11 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
bgmac_maskset(bgmac, offset, ~0, set);
}
+static inline void bgmac_umac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, u32 set)
+{
+ bgmac_maskset(bgmac, BGMAC_UNIMAC + offset, mask, set);
+}
+
static inline int bgmac_phy_connect(struct bgmac *bgmac)
{
return bgmac->phy_connect(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 28069b290862..b652ed72a621 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13071,8 +13071,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d10e4f85dd11..d31a5ad7522a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -8603,7 +8603,7 @@ msix_setup_exit:
static int bnxt_init_inta(struct bnxt *bp)
{
- bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
if (!bp->irq_tbl)
return -ENOMEM;
@@ -12091,8 +12091,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = bnxt_xdp,
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index fcc262064766..641303894341 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -133,12 +133,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
txr = rxr->bnapi->tx_ring;
- xdp.data_hard_start = *data_ptr - offset;
- xdp.data = *data_ptr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = *data_ptr + *len;
- xdp.rxq = &rxr->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
+ xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index f6ca01da141d..0a6d91b0f0aa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -16,6 +16,8 @@
#include <linux/dim.h>
#include <linux/ethtool.h>
+#include "../unimac.h"
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -150,63 +152,6 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
-#define UMAC_HD_BKP_CTRL 0x004
-#define HD_FC_EN (1 << 0)
-#define HD_FC_BKOFF_OK (1 << 1)
-#define IPG_CONFIG_RX_SHIFT 2
-#define IPG_CONFIG_RX_MASK 0x1F
-
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define UMAC_SPEED_10 0
-#define UMAC_SPEED_100 1
-#define UMAC_SPEED_1000 2
-#define UMAC_SPEED_2500 3
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00C
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_MODE 0x44
-#define MODE_LINK_STATUS (1 << 5)
-
-#define UMAC_EEE_CTRL 0x064
-#define EN_LPI_RX_PAUSE (1 << 0)
-#define EN_LPI_TX_PFC (1 << 1)
-#define EN_LPI_TX_PAUSE (1 << 2)
-#define EEE_EN (1 << 3)
-#define RX_FIFO_CHECK (1 << 4)
-#define EEE_TX_CLK_DIS (1 << 5)
-#define DIS_EEE_10M (1 << 6)
-#define LP_IDLE_PREDICTION_MODE (1 << 7)
-
-#define UMAC_EEE_LPI_TIMER 0x068
-#define UMAC_EEE_WAKE_TIMER 0x06C
-#define UMAC_EEE_REF_COUNT 0x070
-#define EEE_REFERENCE_COUNT_MASK 0xffff
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6fb6c3556285..17f997ef950f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -63,11 +63,11 @@ void bcmgenet_mii_setup(struct net_device *dev)
/* speed */
if (phydev->speed == SPEED_1000)
- cmd_bits = UMAC_SPEED_1000;
+ cmd_bits = CMD_SPEED_1000;
else if (phydev->speed == SPEED_100)
- cmd_bits = UMAC_SPEED_100;
+ cmd_bits = CMD_SPEED_100;
else
- cmd_bits = UMAC_SPEED_10;
+ cmd_bits = CMD_SPEED_10;
cmd_bits <<= CMD_SPEED_SHIFT;
/* duplex */
diff --git a/drivers/net/ethernet/broadcom/unimac.h b/drivers/net/ethernet/broadcom/unimac.h
new file mode 100644
index 000000000000..585a85286257
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/unimac.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __UNIMAC_H
+#define __UNIMAC_H
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET_OLD (1 << 11)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_RX_ERR_DISC (1 << 26)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_PAUSE_QUANTA 0x018
+#define UMAC_MODE 0x044
+#define MODE_LINK_STATUS (1 << 5)
+#define UMAC_FRM_TAG0 0x048 /* outer tag */
+#define UMAC_FRM_TAG1 0x04c /* inner tag */
+#define UMAC_TX_IPG_LEN 0x05c
+#define UMAC_EEE_CTRL 0x064
+#define EN_LPI_RX_PAUSE (1 << 0)
+#define EN_LPI_TX_PFC (1 << 1)
+#define EN_LPI_TX_PAUSE (1 << 2)
+#define EEE_EN (1 << 3)
+#define RX_FIFO_CHECK (1 << 4)
+#define EEE_TX_CLK_DIS (1 << 5)
+#define DIS_EEE_10M (1 << 6)
+#define LP_IDLE_PREDICTION_MODE (1 << 7)
+#define UMAC_EEE_LPI_TIMER 0x068
+#define UMAC_EEE_WAKE_TIMER 0x06C
+#define UMAC_EEE_REF_COUNT 0x070
+#define EEE_REFERENCE_COUNT_MASK 0xffff
+#define UMAC_RX_IPG_INV 0x078
+#define UMAC_MACSEC_PROG_TX_CRC 0x310
+#define UMAC_MACSEC_CTRL 0x314
+#define UMAC_PAUSE_CTRL 0x330
+#define UMAC_TX_FLUSH 0x334
+#define UMAC_RX_FIFO_STATUS 0x338
+#define UMAC_TX_FIFO_STATUS 0x33c
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 37d064193f0f..2a0d64e5797c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1163,7 +1163,7 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
oct->flags |= LIO_FLAG_MSI_ENABLED;
/* allocate storage for the names assigned to the irq */
- oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+ oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
if (!oct->irq_name_storage)
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7d00d3a8ded4..7c5af4beedc6 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3219,8 +3219,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_set_vf_mac = liquidio_set_vf_mac,
.ndo_set_vf_vlan = liquidio_set_vf_vlan,
.ndo_get_vf_config = liquidio_get_vf_config,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 103440f97bc8..516f166ceff8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1879,8 +1879,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 387a57cbfb73..e159194d0aef 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -545,7 +545,7 @@ static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES];
static u32 octeon_device_count;
/* locks device array (i.e. octeon_device[]) */
-static spinlock_t octeon_devices_lock;
+static DEFINE_SPINLOCK(octeon_devices_lock);
static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
@@ -563,7 +563,6 @@ void octeon_init_device_list(int conf_type)
memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
for (i = 0; i < MAX_OCTEON_DEVICES; i++)
oct_set_config_info(i, conf_type);
- spin_lock_init(&octeon_devices_lock);
}
static void *__retrieve_octeon_config_info(struct octeon_device *oct,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f3b7b443f964..c33b4e837515 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -530,6 +530,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
struct rcv_queue *rq, struct sk_buff **skb)
{
+ unsigned char *hard_start, *data;
struct xdp_buff xdp;
struct page *page;
u32 action;
@@ -547,12 +548,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page);
- xdp.data = (void *)cpu_addr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + len;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ &rq->xdp_rxq);
+ hard_start = page_address(page);
+ data = (unsigned char *)cpu_addr;
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7fd264a6d085..15542661e3d2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3882,8 +3882,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 196652a114c5..550cc065649f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1600,7 +1600,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up.
*/
eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (chip_ver > CHELSIO_T5)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1832,6 +1833,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
struct adapter *adapter;
int qidx, credits, ret;
size_t fw_hdr_copy_len;
+ unsigned int chip_ver;
u64 cntrl, *end;
u32 wr_mid;
@@ -1896,6 +1898,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
goto out_free;
}
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
/* After we're done injecting the Work Request for this
@@ -1907,7 +1910,8 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up.
*/
eth_txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (chip_ver > CHELSIO_T5)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle
@@ -1960,7 +1964,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
*/
cpl = (void *)(lso + 1);
- if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ if (chip_ver <= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
@@ -3598,6 +3602,25 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
}
txq = &s->ethtxq[pi->first_qset + rspq->idx];
+
+ /* We've got the Hardware Consumer Index Update in the Egress Update
+ * message. These Egress Update messages will be our sole CIDX Updates
+ * we get since we don't want to chew up PCIe bandwidth for both Ingress
+ * Messages and Status Page writes. However, The code which manages
+ * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
+ * stored in the Status Page at the end of the TX Queue. It's easiest
+ * to simply copy the CIDX Update value from the Egress Update message
+ * to the Status Page. Also note that no Endian issues need to be
+ * considered here since both are Big Endian and we're just copying
+ * bytes consistently ...
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ struct cpl_sge_egr_update *egr;
+
+ egr = (struct cpl_sge_egr_update *)rsp;
+ WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
+ }
+
t4_sge_eth_txq_egress_update(adapter, txq, -1);
}
@@ -4583,11 +4606,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
* write the CIDX Updates into the Status Page at the end of the
* TX Queue.
*/
- c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ?
+ FW_EQ_ETH_CMD_AUTOEQUIQE_F :
+ FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ?
+ HOSTFCMODE_INGRESS_QUEUE_X :
+ HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
@@ -4598,6 +4625,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
: FETCHBURSTMIN_64B_T6_X) |
FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) |
FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index 47d9268a7e3c..585590520076 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -92,9 +92,6 @@ static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
static struct cxgb4_uld_info ch_ipsec_uld_info = {
.name = CHIPSEC_DRV_MODULE_NAME,
- .nrxq = MAX_ULD_QSETS,
- /* Max ntxq will be derived from fw config file*/
- .rxq_size = 1024,
.add = ch_ipsec_uld_add,
.state_change = ch_ipsec_uld_state_change,
.tx_handler = ch_ipsec_xmit,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index fb269d587b74..f04ec53544ae 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2509,8 +2509,6 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
@@ -2535,8 +2533,6 @@ static const struct net_device_ops enic_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d402d83d9edd..b6eba29d8e99 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5179,8 +5179,6 @@ static const struct net_device_ops be_netdev_ops = {
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = be_features_check,
.ndo_get_phys_port_id = be_get_phys_port_id,
};
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 4360ce4d3fb6..d8e568f6caf3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2532,12 +2532,10 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
return XDP_PASS;
}
- xdp.data = vaddr + fd_off;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + qm_fd_get_length(fd);
- xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE;
- xdp.rxq = &dpaa_fq->xdp_rxq;
+ xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
+ &dpaa_fq->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
+ XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
/* We reserve a fixed headroom of 256 bytes under the erratum and we
* offer it all to XDP programs to use. If no room is left for the
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index fb0bcd18ec0c..41e225baf571 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -350,7 +350,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
u32 xdp_act = XDP_PASS;
- int err;
+ int err, offset;
rcu_read_lock();
@@ -358,14 +358,10 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
if (!xdp_prog)
goto out;
- xdp.data = vaddr + dpaa2_fd_get_offset(fd);
- xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.rxq = &ch->xdp_rxq;
-
- xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
- (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+ offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
+ dpaa2_fd_get_len(fd), false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -1262,6 +1258,22 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
+static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
+ bool enable)
+{
+ int err;
+
+ err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
+
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_enable_vlan_filter failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1691,7 +1703,7 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
*/
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
goto out;
/* Chech link state; speed / duplex changes are not treated yet */
@@ -1730,7 +1742,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
- if (!priv->mac) {
+ if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
* make sure we don't race against the link up notification,
* which may come immediately after dpni_enable();
@@ -1752,7 +1764,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
phylink_start(priv->mac->phylink);
return 0;
@@ -1826,11 +1838,11 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
- if (!priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_stop(priv->mac->phylink);
+ } else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
- } else {
- phylink_stop(priv->mac->phylink);
}
/* On dpni_disable(), the MC firmware will:
@@ -1952,6 +1964,43 @@ static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
}
}
+static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
+ vid, 0, 0, 0);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not add the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not remove the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -2058,6 +2107,13 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
bool enable;
int err;
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
+ if (err)
+ return err;
+ }
+
if (changed & NETIF_F_RXCSUM) {
enable = !!(features & NETIF_F_RXCSUM);
err = dpaa2_eth_set_rx_csum(priv, enable);
@@ -2115,7 +2171,7 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
return -EOPNOTSUPP;
@@ -2507,6 +2563,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
.ndo_setup_tc = dpaa2_eth_setup_tc,
+ .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -4015,6 +4073,9 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
NETIF_F_LLTX | NETIF_F_HW_TC;
net_dev->hw_features = net_dev->features;
+ if (priv->dpni_attrs.vlan_filter_entries)
+ net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
return 0;
}
@@ -4042,10 +4103,11 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
- if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
- return 0;
- if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
@@ -4056,23 +4118,38 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
mac->mc_io = priv->mc_io;
mac->net_dev = priv->net_dev;
- err = dpaa2_mac_connect(mac);
- if (err) {
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
- kfree(mac);
- return err;
- }
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
priv->mac = mac;
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ goto err_close_mac;
+ }
+ }
+
return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
}
static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
- if (!priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
+ dpaa2_mac_disconnect(priv->mac);
+
+ if (!dpaa2_eth_has_mac(priv))
return;
- dpaa2_mac_disconnect(priv->mac);
+ dpaa2_mac_close(priv->mac);
kfree(priv->mac);
priv->mac = NULL;
}
@@ -4101,7 +4178,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
dpaa2_eth_update_tx_fqids(priv);
rtnl_lock();
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_eth_disconnect_mac(priv);
else
dpaa2_eth_connect_mac(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index d236b8695c39..c3d456c45102 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -693,6 +693,19 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
}
+static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+{
+ if (priv->mac && priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY)
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+{
+ return priv->mac ? true : false;
+}
+
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
int dpaa2_eth_cls_key_size(u64 key);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index f981a523e13a..bf59708b869e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -85,7 +85,7 @@ static int dpaa2_eth_nway_reset(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_nway_reset(priv->mac->phylink);
return -EOPNOTSUPP;
@@ -97,7 +97,7 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_ksettings_get(priv->mac->phylink,
link_settings);
@@ -115,7 +115,7 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (!priv->mac)
+ if (!dpaa2_eth_is_type_phy(priv))
return -ENOTSUPP;
return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
@@ -127,7 +127,7 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
- if (priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
return;
}
@@ -150,7 +150,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_set_pauseparam(priv->mac->phylink,
pause);
if (pause->autoneg)
@@ -198,7 +198,7 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_strings(p);
break;
}
@@ -211,7 +211,7 @@ static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
num_ss_stats += dpaa2_mac_get_sset_count();
return num_ss_stats;
default:
@@ -313,7 +313,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
*(data + i++) = buf_cnt;
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 828c177df03d..69ad869446cf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -174,30 +174,22 @@ static void dpaa2_mac_link_up(struct phylink_config *config,
dpmac_state->up = 1;
- if (mac->if_link_type == DPMAC_LINK_TYPE_PHY) {
- /* If the DPMAC is configured for PHY mode, we need
- * to pass the link parameters to the MC firmware.
- */
- dpmac_state->rate = speed;
-
- if (duplex == DUPLEX_HALF)
- dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
- else if (duplex == DUPLEX_FULL)
- dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
-
- /* This is lossy; the firmware really should take the pause
- * enablement status rather than pause/asym pause status.
- */
- if (rx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
-
- if (rx_pause ^ tx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
- }
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
@@ -228,32 +220,6 @@ static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
.mac_link_down = dpaa2_mac_link_down,
};
-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
- struct fsl_mc_io *mc_io)
-{
- struct dpmac_attr attr;
- bool fixed = false;
- u16 mc_handle = 0;
- int err;
-
- err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
- &mc_handle);
- if (err || !mc_handle)
- return false;
-
- err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
- if (err)
- goto out;
-
- if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
- fixed = true;
-
-out:
- dpmac_close(mc_io, 0, mc_handle);
-
- return fixed;
-}
-
static int dpaa2_pcs_create(struct dpaa2_mac *mac,
struct device_node *dpmac_node, int id)
{
@@ -302,36 +268,20 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
- struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
struct device_node *dpmac_node;
struct phylink *phylink;
- struct dpmac_attr attr;
int err;
- err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
- &dpmac_dev->mc_handle);
- if (err || !dpmac_dev->mc_handle) {
- netdev_err(net_dev, "dpmac_open() = %d\n", err);
- return -ENODEV;
- }
-
- err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
- if (err) {
- netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
- goto err_close_dpmac;
- }
+ mac->if_link_type = mac->attr.link_type;
- mac->if_link_type = attr.link_type;
-
- dpmac_node = dpaa2_mac_get_node(attr.id);
+ dpmac_node = dpaa2_mac_get_node(mac->attr.id);
if (!dpmac_node) {
- netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
- err = -ENODEV;
- goto err_close_dpmac;
+ netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
+ return -ENODEV;
}
- err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+ err = dpaa2_mac_get_if_mode(dpmac_node, mac->attr);
if (err < 0) {
err = -EINVAL;
goto err_put_node;
@@ -351,9 +301,9 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_put_node;
}
- if (attr.link_type == DPMAC_LINK_TYPE_PHY &&
- attr.eth_if != DPMAC_ETH_IF_RGMII) {
- err = dpaa2_pcs_create(mac, dpmac_node, attr.id);
+ if (mac->attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ mac->attr.eth_if != DPMAC_ETH_IF_RGMII) {
+ err = dpaa2_pcs_create(mac, dpmac_node, mac->attr.id);
if (err)
goto err_put_node;
}
@@ -389,8 +339,7 @@ err_pcs_destroy:
dpaa2_pcs_destroy(mac);
err_put_node:
of_node_put(dpmac_node);
-err_close_dpmac:
- dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+
return err;
}
@@ -402,8 +351,40 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+}
+
+int dpaa2_mac_open(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ int err;
- dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle,
+ &mac->attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ return 0;
+
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_close(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
}
static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 955a52856210..13d42dd58ec9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -17,6 +17,7 @@ struct dpaa2_mac {
struct dpmac_link_state state;
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
+ struct dpmac_attr attr;
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -28,6 +29,10 @@ struct dpaa2_mac {
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
struct fsl_mc_io *mc_io);
+int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+void dpaa2_mac_close(struct dpaa2_mac *mac);
+
int dpaa2_mac_connect(struct dpaa2_mac *mac);
void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 90453dc7baef..9f80bdfeedec 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -62,6 +62,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD_V2(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+
#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
@@ -662,4 +666,17 @@ struct dpni_rsp_single_step_cfg {
__le32 peer_delay;
};
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ u8 en;
+};
+
+struct dpni_cmd_vlan_id {
+ u8 flags;
+ u8 tc_id;
+ u8 flow_id;
+ u8 pad;
+ __le16 vlan_id;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 6ea7db66a632..aa429c17c343 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1225,6 +1225,99 @@ int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 en)
+{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ * @flags: 0 - tc_id and flow_id will be ignored.
+ * Pkt with this vlan_id will be passed to the next
+ * classification stages
+ * DPNI_VLAN_SET_QUEUE_ACTION
+ * Pkt with this vlan_id will be forward directly to
+ * queue defined by the tc_id and flow_id
+ *
+ * @tc_id: Traffic class selection (0-7)
+ * @flow_id: Selects the specific queue out of the set allocated for the
+ * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->flags = flags;
+ cmd_params->tc_id = tc_id;
+ cmd_params->flow_id = flow_id;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpni_add_mac_addr() - Add MAC address filter
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index e7b9e195b534..4e96d9362dd2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1114,4 +1114,13 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_single_step_cfg *ptp_cfg);
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u32 en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id);
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index ee0116ed4738..70e6d97b380f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -14,23 +14,6 @@
#define ENETC_MDIO_DATA 0x8 /* MDIO data */
#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
-{
- return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
-}
-
-static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
- u32 val)
-{
- enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
-}
-
-#define enetc_mdio_rd(mdio_priv, off) \
- _enetc_mdio_rd(mdio_priv, ENETC_##off)
-#define enetc_mdio_wr(mdio_priv, off, val) \
- _enetc_mdio_wr(mdio_priv, ENETC_##off, val)
-#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off)
-
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
#define MDIO_CFG_BSY BIT(0)
#define MDIO_CFG_RD_ER BIT(1)
@@ -47,15 +30,29 @@ static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
#define MDIO_CTL_READ BIT(15)
-#define MDIO_DATA(x) ((x) & 0xffff)
-#define TIMEOUT 1000
+static inline u32 enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
+
+static bool enetc_mdio_is_busy(struct enetc_mdio_priv *mdio_priv)
+{
+ return enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_BSY;
+}
+
static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
{
- u32 val;
+ bool is_busy;
- return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
- !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
+ return readx_poll_timeout(enetc_mdio_is_busy, mdio_priv,
+ is_busy, !is_busy, 10, 10 * 1000);
}
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
@@ -75,7 +72,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -83,11 +80,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -95,7 +92,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
}
/* write the value */
- enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value));
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -121,7 +118,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -129,11 +126,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -141,21 +138,21 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
}
/* initiate the read */
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff;
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
return value;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index bb9887f98841..62f42921933d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -111,6 +111,7 @@ do { \
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
+#define IF_MODE_MII 0x00000001 /* 30-31 MII interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -442,6 +443,9 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
case PHY_INTERFACE_MODE_XGMII:
tmp |= IF_MODE_10G;
break;
+ case PHY_INTERFACE_MODE_MII:
+ tmp |= IF_MODE_MII;
+ break;
default:
tmp |= IF_MODE_GMII;
if (phy_if == PHY_INTERFACE_MODE_RGMII ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d391a45cebb6..541de32ea662 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -58,7 +58,6 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 858cb293152a..5d7824d2b4d4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1502,7 +1502,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- assert(skb->queue_mapping < ndev->ae_handle->q_num);
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
return hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9778c83150f1..4c4252e68de5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1384,10 +1384,10 @@ static int ibmvnic_close(struct net_device *netdev)
/**
* build_hdr_data - creates L2/L3/L4 header data buffer
- * @hdr_field - bitfield determining needed headers
- * @skb - socket buffer
- * @hdr_len - array of header lengths
- * @tot_len - total length of data
+ * @hdr_field: bitfield determining needed headers
+ * @skb: socket buffer
+ * @hdr_len: array of header lengths
+ * @hdr_data: buffer to write the header to
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
@@ -1444,11 +1444,11 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
/**
* create_hdr_descs - create header and header extension descriptors
- * @hdr_field - bitfield determining needed headers
- * @data - buffer containing header data
- * @len - length of data buffer
- * @hdr_len - array of individual header lengths
- * @scrq_arr - descriptor array
+ * @hdr_field: bitfield determining needed headers
+ * @hdr_data: buffer containing header data
+ * @len: length of data buffer
+ * @hdr_len: array of individual header lengths
+ * @scrq_arr: descriptor array
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
@@ -1496,10 +1496,9 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
/**
* build_hdr_descs_arr - build a header descriptor array
- * @skb - socket buffer
- * @num_entries - number of descriptors to be sent
- * @subcrq - first TX descriptor
- * @hdr_field - bit field determining which headers will be sent
+ * @txbuff: tx buffer
+ * @num_entries: number of descriptors to be sent
+ * @hdr_field: bit field determining which headers will be sent
*
* This function will build a TX descriptor array with applicable
* L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
@@ -1925,93 +1924,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return rc;
}
-/**
- * do_change_param_reset returns zero if we are able to keep processing reset
- * events, or non-zero if we hit a fatal error and must halt.
- */
-static int do_change_param_reset(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rwi *rwi,
- u32 reset_state)
-{
- struct net_device *netdev = adapter->netdev;
- int i, rc;
-
- netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
- rwi->reset_reason);
-
- netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
-
- ibmvnic_cleanup(netdev);
-
- if (reset_state == VNIC_OPEN) {
- rc = __ibmvnic_close(netdev);
- if (rc)
- goto out;
- }
-
- release_resources(adapter);
- release_sub_crqs(adapter, 1);
- release_crq_queue(adapter);
-
- adapter->state = VNIC_PROBED;
-
- rc = init_crq_queue(adapter);
-
- if (rc) {
- netdev_err(adapter->netdev,
- "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
- rc = ibmvnic_reset_init(adapter, true);
- if (rc) {
- rc = IBMVNIC_INIT_FAILED;
- goto out;
- }
-
- /* If the adapter was in PROBE state prior to the reset,
- * exit here.
- */
- if (reset_state == VNIC_PROBED)
- goto out;
-
- rc = ibmvnic_login(netdev);
- if (rc) {
- goto out;
- }
-
- rc = init_resources(adapter);
- if (rc)
- goto out;
-
- ibmvnic_disable_irqs(adapter);
-
- adapter->state = VNIC_CLOSED;
-
- if (reset_state == VNIC_CLOSED)
- return 0;
-
- rc = __ibmvnic_open(netdev);
- if (rc) {
- rc = IBMVNIC_OPEN_FAILED;
- goto out;
- }
-
- /* refresh device's multicast list */
- ibmvnic_set_multi(netdev);
-
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
-out:
- if (rc)
- adapter->state = reset_state;
- return rc;
-}
-
-/**
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2028,7 +1941,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->state, adapter->failover_pending,
rwi->reset_reason, reset_state);
- rtnl_lock();
+ adapter->reset_reason = rwi->reset_reason;
+ /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_lock();
+
/*
* Now that we have the rtnl lock, clear any pending failover.
* This will ensure ibmvnic_open() has either completed or will
@@ -2038,7 +1955,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->failover_pending = false;
netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
@@ -2050,25 +1966,37 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (reset_state == VNIC_OPEN &&
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_FAILOVER) {
- adapter->state = VNIC_CLOSING;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ goto out;
+ } else {
+ adapter->state = VNIC_CLOSING;
- /* Release the RTNL lock before link state change and
- * re-acquire after the link state change to allow
- * linkwatch_event to grab the RTNL lock and run during
- * a reset.
- */
- rtnl_unlock();
- rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- rtnl_lock();
- if (rc)
- goto out;
+ /* Release the RTNL lock before link state change and
+ * re-acquire after the link state change to allow
+ * linkwatch_event to grab the RTNL lock and run during
+ * a reset.
+ */
+ rtnl_unlock();
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+ rtnl_lock();
+ if (rc)
+ goto out;
- if (adapter->state != VNIC_CLOSING) {
- rc = -1;
- goto out;
+ if (adapter->state != VNIC_CLOSING) {
+ rc = -1;
+ goto out;
+ }
+
+ adapter->state = VNIC_CLOSED;
}
+ }
- adapter->state = VNIC_CLOSED;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ release_resources(adapter);
+ release_sub_crqs(adapter, 1);
+ release_crq_queue(adapter);
}
if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
@@ -2077,7 +2005,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
release_sub_crqs(adapter, 1);
} else {
@@ -2116,7 +2046,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- if (adapter->req_rx_queues != old_num_rx_queues ||
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_resources(adapter);
+ if (rc)
+ goto out;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues ||
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
@@ -2181,7 +2115,9 @@ out:
/* restore the adapter state if reset failed */
if (rc)
adapter->state = reset_state;
- rtnl_unlock();
+ /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_unlock();
netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
adapter->state, adapter->failover_pending, rc);
@@ -2312,10 +2248,7 @@ static void __ibmvnic_reset(struct work_struct *work)
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
- /* CHANGE_PARAM requestor holds rtnl_lock */
- rc = do_change_param_reset(adapter, rwi, reset_state);
- } else if (adapter->force_reset_recovery) {
+ if (adapter->force_reset_recovery) {
/*
* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 5c19ff452558..2fb52bd6fc0e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1531,8 +1531,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
.ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
.ndo_features_check = fm10k_features_check,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1db482d310c2..521ea9df38d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -12804,8 +12804,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4aca637d4a23..2574e78f7597 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2344,7 +2344,7 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
**/
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
@@ -2352,9 +2352,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
struct xdp_buff xdp;
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+ frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
#endif
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
@@ -2406,12 +2406,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- i40e_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = i40e_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index ed08ace4f05a..647e7fde11b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -911,7 +911,7 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
return;
}
- speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
+ speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
if (!speed)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c52b9bb0e3ab..6e251dfffc91 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6790,6 +6790,4 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index a2d0aad8cfdd..422f53997c02 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1089,23 +1089,25 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*/
int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
struct bpf_prog *xdp_prog = NULL;
struct xdp_buff xdp;
bool failure;
- xdp.rxq = &rx_ring->xdp_rxq;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
+ unsigned int offset = ice_rx_offset(rx_ring);
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
+ unsigned char *hard_start;
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
@@ -1151,10 +1153,9 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
goto construct_skb;
}
- xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
- xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
- xdp.data_meta = xdp.data;
- xdp.data_end = xdp.data + size;
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index bc2f4390b51d..02b12736ea80 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -191,12 +191,7 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
- /* this is tracked separately to help us debug stack drops */
- rx_ring->rx_stats.gro_dropped++;
- netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
- rx_ring->q_index);
- }
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 03f78fdb0dcd..84d4284b8b32 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5959,15 +5959,6 @@ static int igb_tso(struct igb_ring *tx_ring,
return 1;
}
-static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
@@ -5990,10 +5981,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- igb_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -8681,13 +8669,13 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
u16 cleaned_count = igb_desc_unused(rx_ring);
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
+ u32 frame_sz = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
+ frame_sz = igb_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
@@ -8715,12 +8703,12 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- igb_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = igb_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 30fdea24e94a..fb3fbcb13331 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2072,15 +2072,6 @@ static int igbvf_tso(struct igbvf_ring *tx_ring,
return 1;
}
-static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, __be16 protocol)
{
@@ -2102,10 +2093,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((protocol == htons(ETH_P_IPV6)) &&
- igbvf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index afd6a62da29d..43aec42e6d9d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -949,15 +949,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
}
}
-static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
@@ -980,10 +971,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if ((first->protocol == htons(ETH_P_IP) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- (first->protocol == htons(ETH_P_IPV6) &&
- igc_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 393d1c2cd853..e08c01525fd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2291,7 +2291,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
const int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbe_adapter *adapter = q_vector->adapter;
#ifdef IXGBE_FCOE
int ddp_bytes;
@@ -2301,12 +2301,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
- xdp.rxq = &rx_ring->xdp_rxq;
-
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
@@ -2336,12 +2335,12 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbe_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = ixgbe_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
@@ -8040,15 +8039,6 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 1;
}
-static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
@@ -8074,10 +8064,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbe_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -10278,8 +10265,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4061cd7db5dd..a14e55e7fce8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1121,19 +1121,18 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbevf_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
- xdp.rxq = &rx_ring->xdp_rxq;
-
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
struct ixgbevf_rx_buffer *rx_buffer;
@@ -1161,12 +1160,12 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbevf_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = ixgbevf_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
@@ -3844,15 +3843,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
@@ -3873,10 +3863,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbevf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc4d8d144401..6290bfb6494e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2263,11 +2263,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */
prefetch(data);
-
- xdp->data_hard_start = data;
- xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
- xdp->data_end = xdp->data + data_len;
- xdp_set_data_meta_invalid(xdp);
+ xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
+ data_len, false);
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
@@ -2363,9 +2360,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
u32 desc_status, frame_sz;
struct xdp_buff xdp_buf;
+ xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL;
- xdp_buf.frame_sz = PAGE_SIZE;
- xdp_buf.rxq = &rxq->xdp_rxq;
sinfo.nr_frags = 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8867f25afab4..663157dc8062 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -143,7 +143,7 @@ struct mvpp2_cls_c2_entry {
/* Number of per-port dedicated entries in the C2 TCAM */
#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
-/* Each port has oen range per flow type + one entry controling the global RSS
+/* Each port has one range per flow type + one entry controlling the global RSS
* setting and the default rx queue
*/
#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 358119d98358..143522908477 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3563,17 +3563,17 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
frag_size = bm_pool->frag_size;
if (xdp_prog) {
- xdp.data_hard_start = data;
- xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
- xdp.data_end = xdp.data + rx_bytes;
- xdp.frame_sz = PAGE_SIZE;
+ struct xdp_rxq_info *xdp_rxq;
if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
- xdp.rxq = &rxq->xdp_rxq_short;
+ xdp_rxq = &rxq->xdp_rxq_short;
else
- xdp.rxq = &rxq->xdp_rxq_long;
+ xdp_rxq = &rxq->xdp_rxq_long;
- xdp_set_data_meta_invalid(&xdp);
+ xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
+ xdp_prepare_buff(&xdp, data,
+ MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
+ rx_bytes, false);
ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index a30eb90ba3d2..6ee53c52627f 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -914,15 +914,15 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
@@ -931,7 +931,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -999,12 +1000,17 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
return -EINVAL;
}
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1425,8 +1431,9 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1630,8 +1637,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1761,19 +1769,20 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_PROTO_UN;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
MVPP2_PRS_RI_L4_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1786,14 +1795,19 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_ADDR_UN;
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 1a8f5a039d50..84a91234ba8e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -867,7 +867,7 @@ static int cgx_lmac_init(struct cgx *cgx)
cgx->lmac_count = MAX_LMAC_PER_CGX;
for (i = 0; i < cgx->lmac_count; i++) {
- lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index a1f79445db71..3c640f6aba92 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -162,6 +162,11 @@ enum key_fields {
NPC_DIP_IPV4,
NPC_SIP_IPV6,
NPC_DIP_IPV6,
+ NPC_IPPROTO_TCP,
+ NPC_IPPROTO_UDP,
+ NPC_IPPROTO_SCTP,
+ NPC_IPPROTO_AH,
+ NPC_IPPROTO_ESP,
NPC_SPORT_TCP,
NPC_DPORT_TCP,
NPC_SPORT_UDP,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e8fd712860a1..0b6bf9f0c6f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1323,7 +1323,7 @@ static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
break;
default:
return rvu_get_blkaddr(rvu, blktype, 0);
- };
+ }
if (is_block_implemented(rvu->hw, blkaddr))
return blkaddr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index d27543c1a166..f60499562d2e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1757,6 +1757,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
default:
+ seq_puts(s, "\n");
break;
}
}
@@ -1785,7 +1786,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
break;
default:
break;
- };
+ }
} else {
switch (rule->rx_action.op) {
case NIX_RX_ACTIONOP_DROP:
@@ -1806,7 +1807,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
break;
default:
break;
- };
+ }
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 14832b66d1fe..4ba9d54ce4e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -26,6 +26,11 @@ static const char * const npc_flow_names[] = {
[NPC_DIP_IPV4] = "ipv4 destination ip",
[NPC_SIP_IPV6] = "ipv6 source ip",
[NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_IPPROTO_TCP] = "ip proto tcp",
+ [NPC_IPPROTO_UDP] = "ip proto udp",
+ [NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_AH] = "ip proto AH",
+ [NPC_IPPROTO_ESP] = "ip proto ESP",
[NPC_SPORT_TCP] = "tcp source port",
[NPC_DPORT_TCP] = "tcp destination port",
[NPC_SPORT_UDP] = "udp source port",
@@ -212,13 +217,13 @@ static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
return false;
}
-static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
- u8 intf)
+static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
{
if (!npc_is_field_present(rvu, type, intf) ||
npc_check_overlap(rvu, blkaddr, type, 0, intf))
- return -EOPNOTSUPP;
- return 0;
+ return false;
+ return true;
}
static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
@@ -269,7 +274,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
break;
default:
return;
- };
+ }
npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
}
@@ -448,14 +453,13 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 *features = &mcam->rx_features;
u64 tcp_udp_sctp;
- int err, hdr;
+ int hdr;
if (is_npc_intf_tx(intf))
features = &mcam->tx_features;
for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
- err = npc_check_field(rvu, blkaddr, hdr, intf);
- if (!err)
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
*features |= BIT_ULL(hdr);
}
@@ -464,13 +468,26 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
/* for tcp/udp/sctp corresponding layer type should be in the key */
- if (*features & tcp_udp_sctp)
- if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ if (*features & tcp_udp_sctp) {
+ if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
*features &= ~tcp_udp_sctp;
+ else
+ *features |= BIT_ULL(NPC_IPPROTO_TCP) |
+ BIT_ULL(NPC_IPPROTO_UDP) |
+ BIT_ULL(NPC_IPPROTO_SCTP);
+ }
+
+ /* for AH, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_AH);
+
+ /* for ESP, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_ESP);
/* for vlan corresponding layer type should be in the key */
if (*features & BIT_ULL(NPC_OUTER_VID))
- if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
}
@@ -743,13 +760,13 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
return;
/* For tcp/udp/sctp LTYPE should be present in entry */
- if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP)))
+ if (features & BIT_ULL(NPC_IPPROTO_TCP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
0, ~0ULL, 0, intf);
- if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP)))
+ if (features & BIT_ULL(NPC_IPPROTO_UDP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
0, ~0ULL, 0, intf);
- if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP)))
+ if (features & BIT_ULL(NPC_IPPROTO_SCTP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
0, ~0ULL, 0, intf);
@@ -758,6 +775,15 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+ /* For AH, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_AH))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
+ 0, ~0ULL, 0, intf);
+ /* For ESP, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_ESP))
+ npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 0, ~0ULL, 0, intf);
+
#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
do { \
if (features & BIT_ULL((field))) { \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 73fb94dd5fbc..bdfa2e293531 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -270,14 +270,17 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
+ struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
+ rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -297,10 +300,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
}
}
- aq->rss.rq = rss->ind_tbl[idx];
+ aq->rss.rq = rss_ctx->ind_tbl[idx];
/* Fill AQ info */
- aq->qidx = idx;
+ aq->qidx = index + idx;
aq->ctype = NIX_AQ_CTYPE_RSS;
aq->op = NIX_AQ_INSTOP_INIT;
}
@@ -335,9 +338,10 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(rss->ind_tbl);
+ rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Init RSS key if it is not setup already */
if (!rss->enable)
@@ -345,13 +349,19 @@ int otx2_rss_init(struct otx2_nic *pfvf)
otx2_set_rss_key(pfvf);
if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Default indirection table */
+ /* Set RSS group 0 as default indirection table */
+ rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
+ GFP_KERNEL);
+ if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
+ return -ENOMEM;
+
+ rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] =
+ rss_ctx->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
}
- ret = otx2_set_rss_table(pfvf);
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
if (ret)
return ret;
@@ -986,7 +996,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->sq_cnt = pfvf->hw.tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
- nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
+ nixlf->rss_grps = MAX_RSS_GROUPS;
nixlf->xqe_sz = NIX_XQESZ_W16;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 103430400a8a..143ae04c8ad5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -51,13 +51,17 @@ enum arua_mapped_qtypes {
#define NIX_LF_POISON_VEC 0x82
/* RSS configuration */
+struct otx2_rss_ctx {
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+};
+
struct otx2_rss_info {
u8 enable;
u32 flowkey_cfg;
u16 rss_size;
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
+ struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
};
/* NIX (or NPC) RX errors */
@@ -643,7 +647,7 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -684,10 +688,11 @@ int otx2_get_flow(struct otx2_nic *pfvf,
int otx2_get_all_flows(struct otx2_nic *pfvf,
struct ethtool_rxnfc *nfc, u32 *rule_locs);
int otx2_add_flow(struct otx2_nic *pfvf,
- struct ethtool_rx_flow_spec *fsp);
+ struct ethtool_rxnfc *nfc);
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 67171b66a56c..aaba0454d188 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -581,7 +581,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
- ret = otx2_add_flow(pfvf, &nfc->fs);
+ ret = otx2_add_flow(pfvf, nfc);
break;
case ETHTOOL_SRXCLSRLDEL:
if (netif_running(dev) && ntuple)
@@ -641,42 +641,50 @@ static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
-
- return pfvf->hw.rss_info.rss_size;
+ return MAX_RSS_INDIR_TBL_SIZE;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
+static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_info *rss;
- int idx;
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- rss = &pfvf->hw.rss_info;
+ otx2_rss_ctx_flow_del(pfvf, ctx_id);
+ kfree(rss->rss_ctx[ctx_id]);
+ rss->rss_ctx[ctx_id] = NULL;
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
- }
+ return 0;
+}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
+static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
+ u32 *rss_context)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u8 ctx;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
+ if (!rss->rss_ctx[ctx])
+ break;
+ }
+ if (ctx == MAX_RSS_GROUPS)
+ return -EINVAL;
+
+ rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
+ if (!rss->rss_ctx[ctx])
+ return -ENOMEM;
+ *rss_context = ctx;
return 0;
}
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+/* RSS context configuration */
+static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc,
+ u32 *rss_context, bool delete)
{
struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int idx;
+ int ret, idx;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -688,20 +696,85 @@ static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+ if (delete)
+ return otx2_rss_ctx_delete(pfvf, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (ret)
+ return ret;
+ }
if (indir) {
+ rss_ctx = rss->rss_ctx[*rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = indir[idx];
}
+ otx2_set_rss_table(pfvf, *rss_context);
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
- otx2_set_rss_key(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc, u32 rss_context)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
+ struct otx2_rss_info *rss;
+ int idx, rx_queues;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ rx_queues = pfvf->hw.rx_queues;
+ for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
+ indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
+ return 0;
+ }
+ if (rss_context >= MAX_RSS_GROUPS)
+ return -ENOENT;
+
+ rss_ctx = rss->rss_ctx[rss_context];
+ if (!rss_ctx)
+ return -ENOENT;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss_ctx->ind_tbl[idx];
}
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
- otx2_set_rss_table(pfvf);
return 0;
}
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
+ DEFAULT_RSS_CONTEXT_GROUP);
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
+
+ return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
+}
+
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -771,6 +844,8 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -866,6 +941,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index be8ccfce1848..d6b5bf247e31 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -16,6 +16,7 @@ struct otx2_flow {
u32 location;
u16 entry;
bool is_vf;
+ u8 rss_ctx_id;
int vf;
};
@@ -245,6 +246,7 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
if (iter->location == location) {
nfc->fs = iter->flow_spec;
+ nfc->rss_context = iter->rss_ctx_id;
return 0;
}
}
@@ -270,14 +272,16 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
return err;
}
-static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
- struct npc_install_flow_req *req,
- u32 flow_type)
+static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
{
struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
@@ -297,10 +301,16 @@ static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4);
}
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
if (ipv4_l4_mask->ip4src) {
memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
sizeof(pkt->ip4src));
@@ -339,20 +349,60 @@ static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
else
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ah_esp_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ah_esp_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tos & ah_esp_mask->tos))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
break;
default:
break;
}
+
+ return 0;
}
-static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
- struct npc_install_flow_req *req,
- u32 flow_type)
+static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
{
struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
@@ -372,10 +422,16 @@ static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip6dst));
req->features |= BIT_ULL(NPC_DIP_IPV6);
}
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
sizeof(pkt->ip6src));
@@ -414,10 +470,47 @@ static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
else
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
+ memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tclass & ah_esp_mask->tclass))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
default:
break;
}
+
+ return 0;
}
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
@@ -428,8 +521,9 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
u32 flow_type;
+ int ret;
- flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
switch (flow_type) {
/* bits not set in mask are don't care */
case ETHER_FLOW:
@@ -455,13 +549,21 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
- otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
break;
case IPV6_USER_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
- otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
break;
default:
return -EOPNOTSUPP;
@@ -532,9 +634,13 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* change to unicast only if action of default entry is not
* requested by user
*/
- if (req->op != NIX_RX_ACTION_DEFAULT)
+ if (flow->flow_spec.flow_type & FLOW_RSS) {
+ req->op = NIX_RX_ACTIONOP_RSS;
+ req->index = flow->rss_ctx_id;
+ } else {
req->op = NIX_RX_ACTIONOP_UCAST;
- req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ }
vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
if (vf > pci_num_vf(pfvf->pdev)) {
mutex_unlock(&pfvf->mbox.lock);
@@ -555,14 +661,16 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
-int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
- u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
bool new = false;
+ u32 ring;
int err;
+ ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return -ENOMEM;
@@ -585,6 +693,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
/* struct copy */
flow->flow_spec = *fsp;
+ if (fsp->flow_type & FLOW_RSS)
+ flow->rss_ctx_id = nfc->rss_context;
+
err = otx2_add_flow_msg(pfvf, flow);
if (err) {
if (new)
@@ -647,6 +758,22 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
return 0;
}
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_flow *flow, *tmp;
+ int err;
+
+ list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
+ if (flow->rss_ctx_id != ctx_id)
+ continue;
+ err = otx2_remove_flow(pfvf, flow->location);
+ if (err)
+ netdev_warn(pfvf->netdev,
+ "Can't delete the rule %d associated with this rss group err:%d",
+ flow->location, err);
+ }
+}
+
int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 7d83e1f91ef1..8c2b03151736 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -580,16 +580,12 @@ int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
}
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
- struct switchdev_trans *trans,
struct net_device *dev,
unsigned long flags)
{
struct prestera_bridge_port *br_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
@@ -608,35 +604,26 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
}
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
- struct switchdev_trans *trans,
unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
struct prestera_switch *sw = port->sw;
- if (switchdev_trans_ph_prepare(trans)) {
- if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
- ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
- return -ERANGE;
- else
- return 0;
- }
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
}
static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
- struct switchdev_trans *trans,
struct net_device *dev,
bool vlan_enabled)
{
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge = prestera_bridge_by_dev(sw->swdev, dev);
if (WARN_ON(!bridge))
return -EINVAL;
@@ -665,19 +652,15 @@ static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
return 0;
}
-static int presterar_port_attr_stp_state_set(struct prestera_port *port,
- struct switchdev_trans *trans,
- struct net_device *dev,
- u8 state)
+static int prestera_port_attr_stp_state_set(struct prestera_port *port,
+ struct net_device *dev,
+ u8 state)
{
struct prestera_bridge_port *br_port;
struct prestera_bridge_vlan *br_vlan;
int err;
u16 vid;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
@@ -712,17 +695,15 @@ err_port_stp_set:
}
static int prestera_port_obj_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct prestera_port *port = netdev_priv(dev);
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = presterar_port_attr_stp_state_set(port, trans,
- attr->orig_dev,
- attr->u.stp_state);
+ err = prestera_port_attr_stp_state_set(port, attr->orig_dev,
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (attr->u.brport_flags &
@@ -730,17 +711,15 @@ static int prestera_port_obj_attr_set(struct net_device *dev,
err = -EINVAL;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = prestera_port_attr_br_flags_set(port, trans,
- attr->orig_dev,
+ err = prestera_port_attr_br_flags_set(port, attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = prestera_port_attr_br_ageing_set(port, trans,
+ err = prestera_port_attr_br_ageing_set(port,
attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = prestera_port_attr_br_vlan_set(port, trans,
- attr->orig_dev,
+ err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
attr->u.vlan_filtering);
break;
default:
@@ -1020,7 +999,6 @@ prestera_bridge_port_vlan_del(struct prestera_port *port,
static int prestera_port_vlans_add(struct prestera_port *port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1029,14 +1007,10 @@ static int prestera_port_vlans_add(struct prestera_port *port,
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- u16 vid;
if (netif_is_bridge_master(dev))
return 0;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
if (WARN_ON(!br_port))
return -EINVAL;
@@ -1045,22 +1019,13 @@ static int prestera_port_vlans_add(struct prestera_port *port,
if (!bridge->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = prestera_bridge_port_vlan_add(port, br_port,
- vid, flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return prestera_bridge_port_vlan_add(port, br_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
}
static int prestera_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
@@ -1069,7 +1034,7 @@ static int prestera_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- return prestera_port_vlans_add(port, vlan, trans, extack);
+ return prestera_port_vlans_add(port, vlan, extack);
default:
return -EOPNOTSUPP;
}
@@ -1081,7 +1046,6 @@ static int prestera_port_vlans_del(struct prestera_port *port,
struct net_device *dev = vlan->obj.orig_dev;
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
- u16 vid;
if (netif_is_bridge_master(dev))
return -EOPNOTSUPP;
@@ -1093,8 +1057,7 @@ static int prestera_port_vlans_del(struct prestera_port *port,
if (!br_port->bridge->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- prestera_bridge_port_vlan_del(port, br_port, vid);
+ prestera_bridge_port_vlan_del(port, br_port, vlan->vid);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 32aad4d32b88..51b9700fce83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2839,8 +2839,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
@@ -2873,8 +2871,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c1c9118a66c9..e35e4d7ef4d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -682,8 +682,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
- xdp.rxq = &ring->xdp_rxq;
- xdp.frame_sz = priv->frag_info[0].frag_stride;
+ xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -777,10 +776,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data_hard_start = va - frags[0].page_offset;
- xdp.data = va;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + length;
+ xdp_prepare_buff(&xdp, va - frags[0].page_offset,
+ frags[0].page_offset, length, false);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 77961643d5a9..134bd038ae8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -83,5 +83,6 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
+ steering/dr_ste_v0.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 072363e73f1c..e20c1da95a33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -705,9 +705,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
- mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
- entry->tuple.zone & MLX5_CT_ZONE_MASK,
- MLX5_CT_ZONE_MASK);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
if (IS_ERR(zone_rule->rule)) {
@@ -1241,9 +1239,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
pre_ct->flow_rule = rule;
/* add miss rule */
- memset(spec, 0, sizeof(*spec));
dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct miss rule zone %d", zone);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 1f9526244222..3479672e84cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -81,8 +81,8 @@ static int parse_tunnel(struct mlx5e_priv *priv,
if (!enc_keyid.mask->keyid)
return 0;
- if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_UDP))
+ if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
+ !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
flow_rule_match_mpls(rule, &match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 1fae7fab8297..6488098d2700 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -144,9 +144,9 @@ static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *sta
{
#ifdef CONFIG_MLX5_EN_IPSEC
return mlx5e_ipsec_is_tx_flow(&state->ipsec);
-#endif
-
+#else
return false;
+#endif
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index a9b45606dbdb..a97e8d205094 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -497,20 +497,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
}
}
-bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features)
-{
- struct sec_path *sp = skb_sec_path(skb);
- struct xfrm_state *x;
-
- if (sp && sp->len) {
- x = sp->xvec[0];
- if (x && x->xso.offload_handle)
- return true;
- }
- return false;
-}
-
void mlx5e_ipsec_build_inverse_table(void)
{
u16 mss_inv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 9df9b9a8e09b..3e80742a3caf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -57,8 +57,6 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_inverse_table_init(void);
-bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
@@ -87,8 +85,28 @@ static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ips
return ipsec_st->x;
}
+static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
+{
+ return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+}
+
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg);
+
+static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct sec_path *sp = skb_sec_path(skb);
+
+ if (sp && sp->len) {
+ struct xfrm_state *x = sp->xvec[0];
+
+ if (x && x->xso.offload_handle)
+ return true;
+ }
+ return false;
+}
+
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
@@ -96,7 +114,14 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{}
+static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
+static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features) { return false; }
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6a852b4901aa..f33c38629886 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2068,10 +2068,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
u32 buf_size = 0;
int i;
-#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
-#endif
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
@@ -4376,10 +4374,8 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
-#ifdef CONFIG_MLX5_EN_IPSEC
if (mlx5e_ipsec_feature_check(skb, netdev, features))
return features;
-#endif
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
@@ -4622,8 +4618,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_change_mtu = mlx5e_change_nic_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 989c70c1eda3..cfa0e8552975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -653,8 +653,6 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7f5851c61218..dec93d57542f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1126,12 +1126,8 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
{
- xdp->data_hard_start = va;
- xdp->data = va + headroom;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &rq->xdp_rxq;
- xdp->frame_sz = rq->buff.frame0_sz;
+ xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, va, headroom, len, false);
}
static struct sk_buff *
@@ -1786,12 +1782,10 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
-#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev)) {
netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
return -EINVAL;
}
-#endif
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 4cdf834fa74a..56aa39ac1a1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1317,12 +1317,6 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int err = 0;
int out_index;
- if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
- NL_SET_ERR_MSG_MOD(extack,
- "E-switch priorities unsupported, upgrade FW");
- return -EOPNOTSUPP;
- }
-
/* We check chain range only for tc flows.
* For ft flows, we checked attr->chain was originally 0 and set it to
* FDB_FT_CHAIN which is outside tc range.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 61ed671fe741..74f233eece54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -241,9 +241,8 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
- } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
-
} else
sq->stats->csum_none++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index da901e364656..876e6449edb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1042,8 +1042,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
void *vport_elem;
int err = 0;
- if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
- !MLX5_CAP_QOS(dev, esw_scheduling))
+ if (!esw->qos.enabled)
return 0;
if (vport->qos.enabled)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 947f346bdc2d..381325b4a863 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -141,9 +141,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
- if (!mlx5_chains_prios_supported(chains))
- return 1;
-
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
@@ -541,13 +538,13 @@ mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_handle *miss_rule;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_table *next_ft;
struct mlx5_flow_table *ft;
- struct prio *prio_s = NULL;
struct fs_chain *chain_s;
struct list_head *pos;
+ struct prio *prio_s;
u32 *flow_group_in;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index df1363a34a42..27c2b8416d02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -218,158 +218,6 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
};
-struct dr_action_modify_field_conv {
- u16 hw_field;
- u8 start;
- u8 end;
- u8 l3_type;
- u8 l4_type;
-};
-
-static const struct dr_action_modify_field_conv dr_action_conv_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15,
- },
-};
-
-#define MAX_VLANS 2
-struct dr_action_vlan_info {
- int count;
- u32 headers[MAX_VLANS];
-};
-
-struct dr_action_apply_attr {
- u32 modify_index;
- u16 modify_actions;
- u32 decap_index;
- u16 decap_actions;
- u8 decap_with_vlan:1;
- u64 final_icm_addr;
- u32 flow_tag;
- u32 ctr_id;
- u16 gvmi;
- u16 hit_gvmi;
- u32 reformat_id;
- u32 reformat_size;
- struct dr_action_vlan_info vlans;
-};
-
static int
dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
enum mlx5dr_action_type *action_type)
@@ -394,141 +242,6 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
return 0;
}
-static void dr_actions_init_next_ste(u8 **last_ste,
- u32 *added_stes,
- enum mlx5dr_ste_entry_type entry_type,
- u16 gvmi)
-{
- (*added_stes)++;
- *last_ste += DR_STE_SIZE;
- mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi);
-}
-
-static void dr_actions_apply_tx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct dr_action_apply_attr *attr,
- u32 *added_stes)
-{
- bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
- action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
-
- /* We want to make sure the modify header comes before L2
- * encapsulation. The reason for that is that we support
- * modify headers for outer headers only
- */
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->modify_actions,
- attr->modify_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
- int i;
-
- for (i = 0; i < attr->vlans.count; i++) {
- if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_TX,
- attr->gvmi);
-
- mlx5dr_ste_set_tx_push_vlan(last_ste,
- attr->vlans.headers[i],
- encap);
- }
- }
-
- if (encap) {
- /* Modify header and encapsulation require a different STEs.
- * Since modify header STE format doesn't support encapsulation
- * tunneling_action.
- */
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
- action_type_set[DR_ACTION_TYP_PUSH_VLAN])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_TX,
- attr->gvmi);
-
- mlx5dr_ste_set_tx_encap(last_ste,
- attr->reformat_id,
- attr->reformat_size,
- action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
- /* Whenever prio_tag_required enabled, we can be sure that the
- * previous table (ACL) already push vlan to our packet,
- * And due to HW limitation we need to set this bit, otherwise
- * push vlan + reformat will not work.
- */
- if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
- mlx5dr_ste_set_go_back_bit(last_ste);
- }
-
- if (action_type_set[DR_ACTION_TYP_CTR])
- mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
-}
-
-static void dr_actions_apply_rx(u8 *action_type_set,
- u8 *last_ste,
- struct dr_action_apply_attr *attr,
- u32 *added_stes)
-{
- if (action_type_set[DR_ACTION_TYP_CTR])
- mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
-
- if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
- mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->decap_actions,
- attr->decap_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
- mlx5dr_ste_set_rx_decap(last_ste);
-
- if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
- int i;
-
- for (i = 0; i < attr->vlans.count; i++) {
- if (i ||
- action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
- action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_RX,
- attr->gvmi);
-
- mlx5dr_ste_set_rx_pop_vlan(last_ste);
- }
- }
-
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_MODIFY_PKT,
- attr->gvmi);
- else
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
-
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->modify_actions,
- attr->modify_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_TAG]) {
- if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_RX,
- attr->gvmi);
-
- mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag);
- }
-}
-
/* Apply the actions on the rule STE array starting from the last_ste.
* Actions might require more than one STE, new_num_stes will return
* the new size of the STEs array, rule with actions.
@@ -537,21 +250,20 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn,
enum mlx5dr_ste_entry_type ste_type,
u8 *action_type_set,
u8 *last_ste,
- struct dr_action_apply_attr *attr,
+ struct mlx5dr_ste_actions_attr *attr,
u32 *new_num_stes)
{
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
u32 added_stes = 0;
if (ste_type == MLX5DR_STE_TYPE_RX)
- dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes);
+ mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
+ last_ste, attr, &added_stes);
else
- dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes);
+ mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set,
+ last_ste, attr, &added_stes);
- last_ste += added_stes * DR_STE_SIZE;
*new_num_stes += added_stes;
-
- mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi);
- mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static enum dr_action_domain
@@ -643,9 +355,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 action_type_set[DR_ACTION_TYP_MAX] = {};
+ struct mlx5dr_ste_actions_attr attr = {};
struct mlx5dr_action *dest_action = NULL;
u32 state = DR_ACTION_STATE_NO_ACTION;
- struct dr_action_apply_attr attr = {};
enum dr_action_domain action_domain;
bool recalc_cs_required = false;
u8 *last_ste;
@@ -756,12 +468,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
}
break;
case DR_ACTION_TYP_POP_VLAN:
- max_actions_type = MAX_VLANS;
+ max_actions_type = MLX5DR_MAX_VLANS;
attr.vlans.count++;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- max_actions_type = MAX_VLANS;
- if (attr.vlans.count == MAX_VLANS)
+ max_actions_type = MLX5DR_MAX_VLANS;
+ if (attr.vlans.count == MLX5DR_MAX_VLANS)
return -EINVAL;
attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
@@ -817,132 +529,6 @@ out_invalid_arg:
return -EINVAL;
}
-#define CVLAN_ETHERTYPE 0x8100
-#define SVLAN_ETHERTYPE 0x88a8
-#define HDR_LEN_L2_ONLY 14
-#define HDR_LEN_L2_VLAN 18
-#define REWRITE_HW_ACTION_NUM 6
-
-static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
- struct mlx5dr_action *action,
- void *data, size_t data_sz)
-{
- struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
- u64 ops[REWRITE_HW_ACTION_NUM] = {};
- u32 hdr_fld_4b;
- u16 hdr_fld_2b;
- u16 vlan_type;
- bool vlan;
- int i = 0;
- int ret;
-
- vlan = (data_sz != HDR_LEN_L2_ONLY);
-
- /* dmac_47_16 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 16);
- hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_4b);
- i++;
-
- /* smac_47_16 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 16);
- hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
- MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_4b);
- i++;
-
- /* dmac_15_0 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- i++;
-
- /* ethertype + (optional) vlan */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 32);
- if (!vlan) {
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
- MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b);
- MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16);
- } else {
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
- vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
- hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
- MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b);
- MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18);
- }
- i++;
-
- /* smac_15_0 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- i++;
-
- if (vlan) {
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- i++;
- }
-
- action->rewrite.data = (void *)ops;
- action->rewrite.num_of_actions = i;
-
- ret = mlx5dr_send_postsend_action(dmn, action);
- if (ret) {
- mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n");
- return ret;
- }
-
- return 0;
-}
-
static struct mlx5dr_action *
dr_action_create_generic(enum mlx5dr_action_type action_type)
{
@@ -1217,21 +803,34 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
- /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
- if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN)
- return -EINVAL;
+ u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
+ int ret;
+
+ ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
+ data, data_sz,
+ hw_actions,
+ ACTION_CACHE_LINE_SIZE,
+ &action->rewrite.num_of_actions);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
+ return ret;
+ }
action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
DR_CHUNK_SIZE_8);
- if (!action->rewrite.chunk)
+ if (!action->rewrite.chunk) {
+ mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
return -ENOMEM;
+ }
+ action->rewrite.data = (void *)hw_actions;
action->rewrite.index = (action->rewrite.chunk->icm_addr -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
- ret = dr_actions_l2_rewrite(dmn, action, data, data_sz);
+ ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
+ mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
mlx5dr_icm_free_chunk(action->rewrite.chunk);
return ret;
}
@@ -1243,6 +842,9 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
}
+#define CVLAN_ETHERTYPE 0x8100
+#define SVLAN_ETHERTYPE 0x88a8
+
struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
{
return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
@@ -1315,31 +917,13 @@ dec_ref:
return NULL;
}
-static const struct dr_action_modify_field_conv *
-dr_action_modify_get_hw_info(u16 sw_field)
-{
- const struct dr_action_modify_field_conv *hw_action_info;
-
- if (sw_field >= ARRAY_SIZE(dr_action_conv_arr))
- goto not_found;
-
- hw_action_info = &dr_action_conv_arr[sw_field];
- if (!hw_action_info->end && !hw_action_info->start)
- goto not_found;
-
- return hw_action_info;
-
-not_found:
- return NULL;
-}
-
static int
dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 max_length;
u16 sw_field;
u32 data;
@@ -1349,7 +933,7 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
- hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
@@ -1357,20 +941,12 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
max_length = hw_action_info->end - hw_action_info->start + 1;
- MLX5_SET(dr_action_hw_set, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
- hw_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
- hw_action_info->start);
-
- /* PRM defines that length zero specific length of 32bits */
- MLX5_SET(dr_action_hw_set, hw_action, destination_length,
- max_length == 32 ? 0 : max_length);
-
- MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+ mlx5dr_ste_set_action_add(dmn->ste_ctx,
+ hw_action,
+ hw_action_info->hw_field,
+ hw_action_info->start,
+ max_length,
+ data);
*ret_hw_info = hw_action_info;
@@ -1381,9 +957,9 @@ static int
dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 offset, length, max_length;
u16 sw_field;
u32 data;
@@ -1395,7 +971,7 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
- hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
return -EINVAL;
@@ -1411,19 +987,12 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- MLX5_SET(dr_action_hw_set, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
- hw_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
- hw_action_info->start + offset);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_length,
- length == 32 ? 0 : length);
-
- MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+ mlx5dr_ste_set_action_set(dmn->ste_ctx,
+ hw_action,
+ hw_action_info->hw_field,
+ hw_action_info->start + offset,
+ length,
+ data);
*ret_hw_info = hw_action_info;
@@ -1434,12 +1003,12 @@ static int
dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_dst_hw_info,
- const struct dr_action_modify_field_conv **ret_src_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+ const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
- const struct dr_action_modify_field_conv *hw_dst_action_info;
- const struct dr_action_modify_field_conv *hw_src_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
u16 src_field, dst_field;
/* Get SW modify action data */
@@ -1450,8 +1019,8 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
length = MLX5_GET(copy_action_in, sw_action, length);
/* Convert SW data to HW modify action format */
- hw_src_action_info = dr_action_modify_get_hw_info(src_field);
- hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
+ hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field);
+ hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field);
if (!hw_src_action_info || !hw_dst_action_info) {
mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
return -EINVAL;
@@ -1471,23 +1040,13 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- MLX5_SET(dr_action_hw_copy, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
- hw_dst_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
- hw_dst_action_info->start + dst_offset);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
- length == 32 ? 0 : length);
-
- MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
- hw_src_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
- hw_src_action_info->start + dst_offset);
+ mlx5dr_ste_set_action_copy(dmn->ste_ctx,
+ hw_action,
+ hw_dst_action_info->hw_field,
+ hw_dst_action_info->start + dst_offset,
+ length,
+ hw_src_action_info->hw_field,
+ hw_src_action_info->start + src_offset);
*ret_dst_hw_info = hw_dst_action_info;
*ret_src_hw_info = hw_src_action_info;
@@ -1499,8 +1058,8 @@ static int
dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_dst_hw_info,
- const struct dr_action_modify_field_conv **ret_src_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+ const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 action;
int ret;
@@ -1677,15 +1236,15 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 *num_hw_actions,
bool *modify_ttl)
{
- const struct dr_action_modify_field_conv *hw_dst_action_info;
- const struct dr_action_modify_field_conv *hw_src_action_info;
- u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
- u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
- u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
+ u16 hw_field = 0;
+ u32 l3_type = 0;
+ u32 l4_type = 0;
*modify_ttl = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index aa2c2d6c44e6..47ec88964bf3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -57,6 +57,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
{
int ret;
+ dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
+ if (!dmn->ste_ctx) {
+ mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 6527eb4df153..e3a002983c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -221,6 +221,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_match_param mask = {};
struct mlx5dr_ste_build *sb;
bool inner, rx;
@@ -259,80 +260,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
- mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
- mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
- mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
- mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
- dmn, inner, rx);
+ mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
+ &mask, dmn, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
- mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer))
- mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
- mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
else if (dr_mask_is_tnl_geneve(&mask, dmn))
- mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
- mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
- mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_icmp(&mask, dmn)) {
- ret = mlx5dr_ste_build_icmp(&sb[idx++],
+ ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (ret)
return ret;
}
if (dr_mask_is_tnl_gre_set(&mask.misc))
- mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
/* Inner */
@@ -343,50 +353,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = true;
if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
- mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
- mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++],
+ mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.inner))
- mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
- mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
- mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 6d73719db1f4..ddcb7017e121 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
-static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
+static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
@@ -25,7 +26,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
if (!ste_info_last)
return -ENOMEM;
- mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
+ mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
@@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
@@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->ste_arr;
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -169,6 +172,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste *col_ste,
u8 *hw_ste)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste *new_ste;
int ret;
@@ -180,11 +184,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
- ret = dr_rule_append_to_miss_list(new_ste,
+ ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
mlx5dr_ste_get_miss_list(col_ste),
update_list);
if (ret) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
+ mlx5dr_dbg(dmn, "Failed update dup entry\n");
goto err_exit;
}
@@ -224,6 +228,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
@@ -237,7 +242,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
/* Copy STE control and tag */
memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx];
@@ -253,7 +259,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_ste,
hw_ste);
if (!new_ste) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
+ mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
new_idx);
return NULL;
}
@@ -391,7 +397,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
- mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+ dmn->info.caps.gvmi,
nic_dmn,
new_htbl,
formatted_ste,
@@ -436,13 +443,15 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
* (48B len) which works only on first 32B
*/
- mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
+ mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
+ prev_htbl->ste_arr[0].hw_ste,
new_htbl->chunk->icm_addr,
new_htbl->chunk->num_of_entries);
ste_to_update = &prev_htbl->ste_arr[0];
} else {
- mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
+ mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+ cur_htbl->pointing_ste->hw_ste,
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
@@ -496,6 +505,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste;
@@ -507,8 +518,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
goto free_send_info;
- if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
+ if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
+ miss_list, send_list)) {
+ mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit;
}
@@ -659,6 +671,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
u8 num_of_builders = nic_matcher->num_of_builders;
struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste;
int i, k, ret;
@@ -692,10 +705,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
goto err_exit;
/* Point current ste to the new action */
- mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
+ mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+ prev_hw_ste,
+ action_ste->htbl);
ret = dr_rule_add_member(nic_rule, action_ste);
if (ret) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
+ mlx5dr_dbg(dmn, "Failed adding rule member\n");
goto free_ste_info;
}
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
@@ -722,6 +737,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
/* Take ref on table, only on first time this ste is used */
@@ -730,7 +746,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
ste->ste_chain_location = ste_location;
@@ -743,7 +760,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
ste,
hw_ste,
DR_CHUNK_SIZE_1)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+ mlx5dr_dbg(dmn, "Failed allocating table\n");
goto clean_ste_info;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index d275823bff2f..1614481fdf8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -3,104 +3,7 @@
#include <linux/types.h>
#include <linux/crc32.h>
-#include "dr_types.h"
-
-#define DR_STE_CRC_POLY 0xEDB88320L
-#define STE_IPV4 0x1
-#define STE_IPV6 0x2
-#define STE_TCP 0x1
-#define STE_UDP 0x2
-#define STE_SPI 0x3
-#define IP_VERSION_IPV4 0x4
-#define IP_VERSION_IPV6 0x6
-#define STE_SVLAN 0x1
-#define STE_CVLAN 0x2
-
-#define DR_STE_ENABLE_FLOW_TAG BIT(31)
-
-/* Set to STE a specific value using DR_STE_SET */
-#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
- if ((spec)->s_fname) { \
- MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
- (spec)->s_fname = 0; \
- } \
-} while (0)
-
-/* Set to STE spec->s_fname to tag->t_fname */
-#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
-
-/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
-#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
-
-/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
-#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
-
-#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
- MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
-} while (0)
-
-#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
- in_out##_first_mpls_label);\
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
- in_out##_first_mpls_s_bos); \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
- in_out##_first_mpls_exp); \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
- in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
- in_out##_first_mpls_label);\
- DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
- in_out##_first_mpls_s_bos); \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
- in_out##_first_mpls_exp); \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
- in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
- (_misc)->outer_first_mpls_over_gre_label || \
- (_misc)->outer_first_mpls_over_gre_exp || \
- (_misc)->outer_first_mpls_over_gre_s_bos || \
- (_misc)->outer_first_mpls_over_gre_ttl)
-#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
- (_misc)->outer_first_mpls_over_udp_label || \
- (_misc)->outer_first_mpls_over_udp_exp || \
- (_misc)->outer_first_mpls_over_udp_s_bos || \
- (_misc)->outer_first_mpls_over_udp_ttl)
-
-#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
- ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
- (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
- MLX5DR_STE_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_tunl_action {
- DR_STE_TUNL_ACTION_NONE = 0,
- DR_STE_TUNL_ACTION_ENABLE = 1,
- DR_STE_TUNL_ACTION_DECAP = 2,
- DR_STE_TUNL_ACTION_L3_DECAP = 3,
- DR_STE_TUNL_ACTION_POP_VLAN = 4,
-};
-
-enum dr_ste_action_type {
- DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
- DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
- DR_STE_ACTION_TYPE_ENCAP = 4,
-};
+#include "dr_ste.h"
struct dr_hw_ste_format {
u8 ctrl[DR_STE_SIZE_CTRL];
@@ -142,7 +45,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
return index;
}
-static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
{
u16 byte_mask = 0;
int i;
@@ -155,7 +58,7 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
return byte_mask;
}
-static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
+static u8 *dr_ste_get_tag(u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -169,104 +72,6 @@ void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
}
-void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
- DR_STE_ENABLE_FLOW_TAG | flow_tag);
-}
-
-void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
-{
- /* This can be used for both rx_steering_mult and for sx_transmit */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
-}
-
-void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
-}
-
-void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
- bool go_back)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
- DR_STE_ACTION_TYPE_PUSH_VLAN);
- MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
- /* Due to HW limitation we need to set this bit, otherwise reforamt +
- * push vlan will not work.
- */
- if (go_back)
- mlx5dr_ste_set_go_back_bit(hw_ste_p);
-}
-
-void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
- encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
- /* The hardware expects here size in words (2 byte) */
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
- MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
-}
-
-void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_DECAP);
-}
-
-void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_POP_VLAN);
-}
-
-void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_L3_DECAP);
- MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
-}
-
-void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
-{
- MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
-}
-
-u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
-{
- return MLX5_GET(ste_general, hw_ste_p, entry_type);
-}
-
-void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
- u32 re_write_index)
-{
- MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
- num_of_actions);
- MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
- re_write_index);
-}
-
-void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
-{
- MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
-}
-
-void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
- u16 gvmi)
-{
- MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
- MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
- MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
-
- /* Set GVMI once, this is the same for RX/TX
- * bits 63_48 of next table base / miss address encode the next GVMI
- */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
-}
-
static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
{
memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
@@ -279,21 +84,26 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
hw_ste->mask[0] = 0;
}
-u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p, u64 miss_addr)
{
- u64 index =
- (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
- MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
-
- return index << 6;
+ ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
}
-void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
+static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste, u64 miss_addr)
{
- u64 index = (icm_addr >> 5) | ht_size;
+ u8 *hw_ste_p = ste->hw_ste;
- MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
- MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
+ ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+}
+
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 icm_addr, u32 ht_size)
+{
+ ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
}
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
@@ -317,15 +127,16 @@ struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
return &ste->htbl->miss_list[index];
}
-static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
+static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
u8 *hw_ste = ste->hw_ste;
- MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
- MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
- mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
+ ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
+ ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
}
@@ -363,7 +174,8 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
/* Free ste which is the head and the only one in miss_list */
static void
-dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
+dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste_send_info *ste_info_head,
struct list_head *send_ste_list,
@@ -380,7 +192,7 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
*/
memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
- mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
+ dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
list_del_init(&ste->miss_list_node);
@@ -436,7 +248,8 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
/* Free ste that is located in the middle of the miss list:
* |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
*/
-static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
+static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_ste_send_info *ste_info,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *stats_tbl)
@@ -448,8 +261,8 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
if (WARN_ON(!prev_ste))
return;
- miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
- mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
+ miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
+ ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
prev_ste->hw_ste, ste_info,
@@ -467,6 +280,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
{
struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info ste_info_head;
struct mlx5dr_ste *next_ste, *first_ste;
bool put_on_origin_table = true;
@@ -495,7 +309,8 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
if (!next_ste) {
/* One and only entry in the list */
- dr_ste_remove_head_ste(ste, nic_matcher,
+ dr_ste_remove_head_ste(ste_ctx, ste,
+ nic_matcher,
&ste_info_head,
&send_ste_list,
stats_tbl);
@@ -506,7 +321,9 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
put_on_origin_table = false;
}
} else { /* Ste in the middle of the list */
- dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
+ dr_ste_remove_middle_ste(ste_ctx, ste,
+ &ste_info_head, &send_ste_list,
+ stats_tbl);
}
/* Update HW */
@@ -530,34 +347,18 @@ bool mlx5dr_ste_equal_tag(void *src, void *dst)
return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
}
-void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
- mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
-}
-
-void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
-{
- u64 index = miss_addr >> 6;
-
- /* Miss address for TX and RX STEs located in the same offsets */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
-}
-
-void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
-{
- u8 *hw_ste = ste->hw_ste;
-
- MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
- mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
- dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+ ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
}
/* Init one ste as a pattern for ste data array */
-void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
@@ -565,13 +366,13 @@ void mlx5dr_ste_set_formatted_ste(u16 gvmi,
{
struct mlx5dr_ste ste = {};
- mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
+ ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
ste.hw_ste = formatted_ste;
if (connect_info->type == CONNECT_HIT)
- dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
+ dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
else
- mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
+ dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
}
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -582,7 +383,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
{
u8 formatted_ste[DR_STE_SIZE] = {};
- mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+ dmn->info.caps.gvmi,
nic_dmn,
htbl,
formatted_ste,
@@ -597,18 +399,18 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
u8 *cur_hw_ste,
enum mlx5dr_icm_chunk_size log_table_size)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
- u8 next_lu_type;
+ u16 next_lu_type;
u16 byte_mask;
- next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
- byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
+ next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
+ byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
@@ -628,7 +430,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
goto free_table;
}
- mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
+ mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
+ cur_hw_ste, next_htbl);
ste->next_htbl = next_htbl;
next_htbl->pointing_ste = ste;
}
@@ -657,7 +460,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
- u8 lu_type, u16 byte_mask)
+ u16 lu_type, u16 byte_mask)
{
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste_htbl *htbl;
@@ -709,6 +512,92 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
return 0;
}
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
+ attr, added_stes);
+}
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
+ attr, added_stes);
+}
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
+{
+ const struct mlx5dr_ste_action_modify_field *hw_field;
+
+ if (sw_field >= ste_ctx->modify_field_arr_sz)
+ return NULL;
+
+ hw_field = &ste_ctx->modify_field_arr[sw_field];
+ if (!hw_field->end && !hw_field->start)
+ return NULL;
+
+ return hw_field;
+}
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ ste_ctx->set_action_set((u8 *)hw_action,
+ hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ ste_ctx->set_action_add((u8 *)hw_action,
+ hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ ste_ctx->set_action_copy((u8 *)hw_action,
+ dst_hw_field, dst_shifter, dst_len,
+ src_hw_field, src_shifter);
+}
+
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+ void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
+ if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
+ return -EINVAL;
+
+ return ste_ctx->set_action_decap_l3_list(data, data_sz,
+ hw_action, hw_action_sz,
+ used_hw_action_num);
+}
+
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
@@ -738,6 +627,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_build *sb;
int ret, i;
@@ -748,14 +638,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
sb = nic_matcher->ste_builder;
for (i = 0; i < nic_matcher->num_of_builders; i++) {
- mlx5dr_ste_init(ste_arr,
- sb->lu_type,
- nic_dmn->ste_type,
- dmn->info.caps.gvmi);
+ ste_ctx->ste_init(ste_arr,
+ sb->lu_type,
+ nic_dmn->ste_type,
+ dmn->info.caps.gvmi);
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
- ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
+ ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
if (ret)
return ret;
@@ -765,45 +655,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
* not relevant for the last ste in the chain.
*/
sb++;
- MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
- MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
+ ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
+ ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
}
ste_arr += DR_STE_SIZE;
}
return 0;
}
-static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
- if (mask->smac_47_16 || mask->smac_15_0) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
- mask->smac_47_16 >> 16);
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
- mask->smac_47_16 << 16 | mask->smac_15_0);
- mask->smac_47_16 = 0;
- mask->smac_15_0 = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
-
- if (mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- } else if (mask->svlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
- mask->svlan_tag = 0;
- }
-}
-
static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
{
spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
@@ -1045,566 +904,93 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
}
-static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
-
- if (spec->smac_47_16 || spec->smac_15_0) {
- MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
- spec->smac_47_16 >> 16);
- MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
- spec->smac_47_16 << 16 | spec->smac_15_0);
- spec->smac_47_16 = 0;
- spec->smac_15_0 = 0;
- }
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- pr_info("Unsupported ip_version value\n");
- return -EINVAL;
- }
- }
-
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
-}
-
-static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
-
- return 0;
+ ste_ctx->build_eth_l2_src_dst_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
-}
-
-static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
-
- return 0;
+ ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
- bool inner,
- u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_address, mask, dst_ip_31_0);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_address, mask, src_ip_31_0);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_port, mask, tcp_dport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_port, mask, udp_dport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_port, mask, tcp_sport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_port, mask, udp_sport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- protocol, mask, ip_protocol);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- dscp, mask, ip_dscp);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- ecn, mask, ip_ecn);
-
- if (mask->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
- mask->tcp_flags = 0;
- }
-}
-
-static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
-
- if (spec->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
- spec->tcp_flags = 0;
- }
-
- return 0;
+ ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
-}
-
-static void
-dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
- DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
-
- if (mask->svlan_tag || mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- mask->svlan_tag = 0;
- }
-
- if (inner) {
- if (misc_mask->inner_second_cvlan_tag ||
- misc_mask->inner_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
- misc_mask->inner_second_cvlan_tag = 0;
- misc_mask->inner_second_svlan_tag = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_vlan_id, misc_mask, inner_second_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_cfi, misc_mask, inner_second_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_priority, misc_mask, inner_second_prio);
- } else {
- if (misc_mask->outer_second_cvlan_tag ||
- misc_mask->outer_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
- misc_mask->outer_second_cvlan_tag = 0;
- misc_mask->outer_second_svlan_tag = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_vlan_id, misc_mask, outer_second_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_cfi, misc_mask, outer_second_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_priority, misc_mask, outer_second_prio);
- }
-}
-
-static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
- bool inner, u8 *tag)
-{
- struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc_spec = &value->misc;
-
- DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
- DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- pr_info("Unsupported ip_version value\n");
- return -EINVAL;
- }
- }
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
-
- if (inner) {
- if (misc_spec->inner_second_cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
- misc_spec->inner_second_cvlan_tag = 0;
- } else if (misc_spec->inner_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
- misc_spec->inner_second_svlan_tag = 0;
- }
-
- DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
- } else {
- if (misc_spec->outer_second_cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
- misc_spec->outer_second_cvlan_tag = 0;
- } else if (misc_spec->outer_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
- misc_spec->outer_second_svlan_tag = 0;
- }
- DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
- }
-
- return 0;
-}
-
-static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
-
- dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
- DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
-
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+ ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
-}
-
-static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
- dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
-
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+ ste_ctx->build_eth_l2_src_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
-}
-
-static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
- DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
-
- if (misc->vxlan_vni) {
- MLX5_SET(ste_eth_l2_tnl, bit_mask,
- l2_tunneling_network_id, (misc->vxlan_vni << 8));
- misc->vxlan_vni = 0;
- }
-
- if (mask->svlan_tag || mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- mask->svlan_tag = 0;
- }
+ ste_ctx->build_eth_l2_dst_init(sb, mask);
}
-static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
- DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
-
- if (misc->vxlan_vni) {
- MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
- (misc->vxlan_vni << 8));
- misc->vxlan_vni = 0;
- }
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, bool inner, bool rx)
{
- dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
-}
-
-static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
-
- return 0;
+ ste_ctx->build_eth_l2_tnl_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
-}
-
-static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
-
- if (mask->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
- mask->tcp_flags = 0;
- }
+ ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
}
-static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
- DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
- DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
- DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
- DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
- DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
- DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
- DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
-
- if (spec->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
- spec->tcp_flags = 0;
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
+ ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
}
static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
@@ -1622,653 +1008,110 @@ void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
}
-static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
-
- if (inner)
- DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
- else
- DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
-}
-
-static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
-
- if (sb->inner)
- DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
- else
- DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
-
- return 0;
-}
-
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
-}
-
-static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
-
- DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
+ ste_ctx->build_mpls_init(sb, mask);
}
-static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
-
- DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
- DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
- DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
-
- DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
-
- DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask, bool inner, bool rx)
-{
- dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_gre_tag;
-}
-
-static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
- misc_2_mask, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_gre_ttl);
- } else {
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
- misc_2_mask, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_udp_ttl);
- }
-}
-
-static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2_mask, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_gre_ttl);
- } else {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2_mask, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_udp_ttl);
- }
- return 0;
+ ste_ctx->build_tnl_gre_init(sb, mask);
}
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
-}
-
-#define ICMP_TYPE_OFFSET_FIRST_DW 24
-#define ICMP_CODE_OFFSET_FIRST_DW 16
-#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
-
-static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- u8 *bit_mask)
-{
- bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
- struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
- u32 icmp_header_data_mask;
- u32 icmp_type_mask;
- u32 icmp_code_mask;
- int dw0_location;
- int dw1_location;
-
- if (is_ipv4_mask) {
- icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
- icmp_type_mask = misc_3_mask->icmpv4_type;
- icmp_code_mask = misc_3_mask->icmpv4_code;
- dw0_location = caps->flex_parser_id_icmp_dw0;
- dw1_location = caps->flex_parser_id_icmp_dw1;
- } else {
- icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
- icmp_type_mask = misc_3_mask->icmpv6_type;
- icmp_code_mask = misc_3_mask->icmpv6_code;
- dw0_location = caps->flex_parser_id_icmpv6_dw0;
- dw1_location = caps->flex_parser_id_icmpv6_dw1;
- }
-
- switch (dw0_location) {
- case 4:
- if (icmp_type_mask) {
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
- (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_type = 0;
- else
- misc_3_mask->icmpv6_type = 0;
- }
- if (icmp_code_mask) {
- u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
- flex_parser_4);
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
- cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_code = 0;
- else
- misc_3_mask->icmpv6_code = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- switch (dw1_location) {
- case 5:
- if (icmp_header_data_mask) {
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
- (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_header_data = 0;
- else
- misc_3_mask->icmpv6_header_data = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ ste_ctx->build_tnl_mpls_init(sb, mask);
}
-static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
- u32 icmp_header_data;
- int dw0_location;
- int dw1_location;
- u32 icmp_type;
- u32 icmp_code;
- bool is_ipv4;
-
- is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
- if (is_ipv4) {
- icmp_header_data = misc_3->icmpv4_header_data;
- icmp_type = misc_3->icmpv4_type;
- icmp_code = misc_3->icmpv4_code;
- dw0_location = sb->caps->flex_parser_id_icmp_dw0;
- dw1_location = sb->caps->flex_parser_id_icmp_dw1;
- } else {
- icmp_header_data = misc_3->icmpv6_header_data;
- icmp_type = misc_3->icmpv6_type;
- icmp_code = misc_3->icmpv6_code;
- dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
- dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
- }
-
- switch (dw0_location) {
- case 4:
- if (icmp_type) {
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
- if (is_ipv4)
- misc_3->icmpv4_type = 0;
- else
- misc_3->icmpv6_type = 0;
- }
-
- if (icmp_code) {
- u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
- flex_parser_4);
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
- if (is_ipv4)
- misc_3->icmpv4_code = 0;
- else
- misc_3->icmpv6_code = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- switch (dw1_location) {
- case 5:
- if (icmp_header_data) {
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
- (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
- if (is_ipv4)
- misc_3->icmpv4_header_data = 0;
- else
- misc_3->icmpv6_header_data = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
- int ret;
-
- ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
- if (ret)
- return ret;
-
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
-
- return 0;
-}
-
-static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(general_purpose, bit_mask,
- general_purpose_lookup_field, misc_2_mask,
- metadata_reg_a);
-}
-
-static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
- misc_2_mask, metadata_reg_a);
-
- return 0;
+ return ste_ctx->build_icmp_init(sb, mask);
}
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
-}
-
-static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
-
- if (inner) {
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
- inner_tcp_seq_num);
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
- inner_tcp_ack_num);
- } else {
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
- outer_tcp_seq_num);
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
- outer_tcp_ack_num);
- }
+ ste_ctx->build_general_purpose_init(sb, mask);
}
-static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
- if (sb->inner) {
- DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
- DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
- } else {
- DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
- DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
+ ste_ctx->build_eth_l4_misc_init(sb, mask);
}
-static void
-dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
-
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_flags,
- misc_3_mask, outer_vxlan_gpe_flags);
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_next_protocol,
- misc_3_mask, outer_vxlan_gpe_next_protocol);
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_vni,
- misc_3_mask, outer_vxlan_gpe_vni);
-}
-
-static int
-dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_flags, misc3,
- outer_vxlan_gpe_flags);
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_next_protocol, misc3,
- outer_vxlan_gpe_next_protocol);
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_vni, misc3,
- outer_vxlan_gpe_vni);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
- sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
+ ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
}
-static void
-dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_protocol_type,
- misc_mask, geneve_protocol_type);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_oam,
- misc_mask, geneve_oam);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_opt_len,
- misc_mask, geneve_opt_len);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_vni,
- misc_mask, geneve_vni);
-}
-
-static int
-dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_protocol_type, misc, geneve_protocol_type);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_oam, misc, geneve_oam);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_opt_len, misc, geneve_opt_len);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_vni, misc, geneve_vni);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
+ ste_ctx->build_tnl_geneve_init(sb, mask);
}
-static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
- misc_2_mask, metadata_reg_c_0);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
- misc_2_mask, metadata_reg_c_1);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
- misc_2_mask, metadata_reg_c_2);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
- misc_2_mask, metadata_reg_c_3);
-}
-
-static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
- DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
- DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
- DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
- DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
-
- return 0;
-}
-
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
-}
-
-static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
- misc_2_mask, metadata_reg_c_4);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
- misc_2_mask, metadata_reg_c_5);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
- misc_2_mask, metadata_reg_c_6);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
- misc_2_mask, metadata_reg_c_7);
+ ste_ctx->build_register_0_init(sb, mask);
}
-static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
- DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
- DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
- DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
- DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
-
- return 0;
-}
-
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
+ ste_ctx->build_register_1_init(sb, mask);
}
-static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
- DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
- misc_mask->source_eswitch_owner_vhca_id = 0;
-}
-
-static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc *misc = &value->misc;
- struct mlx5dr_cmd_vport_cap *vport_cap;
- struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
- u8 *bit_mask = sb->bit_mask;
- bool source_gvmi_set;
-
- DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
-
- if (sb->vhca_id_valid) {
- /* Find port GVMI based on the eswitch_owner_vhca_id */
- if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
- else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
- dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
- else
- return -EINVAL;
- } else {
- caps = &dmn->info.caps;
- }
-
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
- if (!vport_cap)
- return -EINVAL;
-
- source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
- if (vport_cap->vport_gvmi && source_gvmi_set)
- MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
-
- misc->source_eswitch_owner_vhca_id = 0;
- misc->source_port = 0;
-
- return 0;
-}
-
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx)
@@ -2276,12 +1119,21 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
- dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->dmn = dmn;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
+ ste_ctx->build_src_gvmi_qpn_init(sb, mask);
+}
+
+static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
+ [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
+ [MLX5_STEERING_FORMAT_CONNECTX_6DX] = NULL,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
+{
+ if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
+ return NULL;
+
+ return mlx5dr_ste_ctx_arr[version];
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
new file mode 100644
index 000000000000..4a3d6a849991
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef _DR_STE_
+#define _DR_STE_
+
+#include "dr_types.h"
+
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_SPI 0x3
+#define IP_VERSION_IPV4 0x4
+#define IP_VERSION_IPV6 0x6
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define HDR_LEN_L2_MACS 0xC
+#define HDR_LEN_L2_VLAN 0x4
+#define HDR_LEN_L2_ETHER 0x2
+#define HDR_LEN_L2 (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
+#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
+
+/* Set to STE a specific value using DR_STE_SET */
+#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
+ if ((spec)->s_fname) { \
+ MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
+ (spec)->s_fname = 0; \
+ } \
+} while (0)
+
+/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
+#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
+
+/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
+#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
+
+#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
+} while (0)
+
+#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
+ struct mlx5dr_match_misc2 *_mask = mask; \
+ u8 *_tag = tag; \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
+ in_out##_first_mpls_label);\
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
+ in_out##_first_mpls_s_bos); \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
+ in_out##_first_mpls_exp); \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
+ in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_gre_label || \
+ (_misc)->outer_first_mpls_over_gre_exp || \
+ (_misc)->outer_first_mpls_over_gre_s_bos || \
+ (_misc)->outer_first_mpls_over_gre_ttl)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_udp_label || \
+ (_misc)->outer_first_mpls_over_udp_exp || \
+ (_misc)->outer_first_mpls_over_udp_s_bos || \
+ (_misc)->outer_first_mpls_over_udp_ttl)
+
+enum dr_ste_action_modify_type_l3 {
+ DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
+ DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
+ DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
+};
+
+enum dr_ste_action_modify_type_l4 {
+ DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
+ DR_STE_ACTION_MDFY_TYPE_L4_TCP = 0x1,
+ DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
+};
+
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
+
+#define DR_STE_CTX_BUILDER(fname) \
+ ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
+ struct mlx5dr_match_param *mask))
+
+struct mlx5dr_ste_ctx {
+ /* Builders */
+ void DR_STE_CTX_BUILDER(eth_l2_src_dst);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
+ void DR_STE_CTX_BUILDER(eth_l2_src);
+ void DR_STE_CTX_BUILDER(eth_l2_dst);
+ void DR_STE_CTX_BUILDER(eth_l2_tnl);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
+ void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
+ void DR_STE_CTX_BUILDER(mpls);
+ void DR_STE_CTX_BUILDER(tnl_gre);
+ void DR_STE_CTX_BUILDER(tnl_mpls);
+ int DR_STE_CTX_BUILDER(icmp);
+ void DR_STE_CTX_BUILDER(general_purpose);
+ void DR_STE_CTX_BUILDER(eth_l4_misc);
+ void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
+ void DR_STE_CTX_BUILDER(tnl_geneve);
+ void DR_STE_CTX_BUILDER(register_0);
+ void DR_STE_CTX_BUILDER(register_1);
+ void DR_STE_CTX_BUILDER(src_gvmi_qpn);
+
+ /* Getters and Setters */
+ void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi);
+ void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
+ u16 (*get_next_lu_type)(u8 *hw_ste_p);
+ void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
+ u64 (*get_miss_addr)(u8 *hw_ste_p);
+ void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+ void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
+ u16 (*get_byte_mask)(u8 *hw_ste_p);
+
+ /* Actions */
+ void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+ void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+ u32 modify_field_arr_sz;
+ const struct mlx5dr_ste_action_modify_field *modify_field_arr;
+ void (*set_action_set)(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+ void (*set_action_add)(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+ void (*set_action_copy)(u8 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter);
+ int (*set_action_decap_l3_list)(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num);
+};
+
+extern struct mlx5dr_ste_ctx ste_ctx_v0;
+
+#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
new file mode 100644
index 000000000000..b76fdff08890
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -0,0 +1,1640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include "dr_ste.h"
+
+#define SVLAN_ETHERTYPE 0x88a8
+#define DR_STE_ENABLE_FLOW_TAG BIT(31)
+
+enum dr_ste_v0_action_tunl {
+ DR_STE_TUNL_ACTION_NONE = 0,
+ DR_STE_TUNL_ACTION_ENABLE = 1,
+ DR_STE_TUNL_ACTION_DECAP = 2,
+ DR_STE_TUNL_ACTION_L3_DECAP = 3,
+ DR_STE_TUNL_ACTION_POP_VLAN = 4,
+};
+
+enum dr_ste_v0_action_type {
+ DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
+ DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
+ DR_STE_ACTION_TYPE_ENCAP = 4,
+};
+
+enum dr_ste_v0_action_mdfy_op {
+ DR_STE_ACTION_MDFY_OP_COPY = 0x1,
+ DR_STE_ACTION_MDFY_OP_SET = 0x2,
+ DR_STE_ACTION_MDFY_OP_ADD = 0x3,
+};
+
+#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
+ ((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
+ (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
+ DR_STE_V0_LU_TYPE_##lookup_type##_O)
+
+enum {
+ DR_STE_V0_LU_TYPE_NOP = 0x00,
+ DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
+ DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_O = 0x06,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_I = 0x07,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_D = 0x1b,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_O = 0x08,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_I = 0x09,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_D = 0x1c,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
+ DR_STE_V0_LU_TYPE_ETHL4_O = 0x13,
+ DR_STE_V0_LU_TYPE_ETHL4_I = 0x14,
+ DR_STE_V0_LU_TYPE_ETHL4_D = 0x21,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_O = 0x2c,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_I = 0x2d,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_D = 0x2e,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_O = 0x15,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_I = 0x24,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_D = 0x25,
+ DR_STE_V0_LU_TYPE_GRE = 0x16,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0 = 0x22,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 = 0x23,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
+ DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
+ DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
+ DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
+ DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum {
+ DR_STE_V0_ACTION_MDFY_FLD_L2_0 = 0,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_1 = 1,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_2 = 2,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_0 = 3,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_1 = 4,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_2 = 5,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_3 = 6,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_4 = 7,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_0 = 8,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_1 = 9,
+ DR_STE_V0_ACTION_MDFY_FLD_MPLS = 10,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0 = 11,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_0 = 12,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_1 = 13,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_2 = 14,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_3 = 15,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_2 = 16,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_0 = 17,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_1 = 18,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_2 = 19,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_3 = 20,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1 = 21,
+ DR_STE_V0_ACTION_MDFY_FLD_METADATA = 22,
+ DR_STE_V0_ACTION_MDFY_FLD_RESERVED = 23,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
+ },
+};
+
+static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+}
+
+static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, entry_type);
+}
+
+static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+ u64 index = miss_addr >> 6;
+
+ /* Miss address for TX and RX STEs located in the same offsets */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
+}
+
+static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
+{
+ u64 index =
+ (MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
+ MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32) << 26);
+
+ return index << 6;
+}
+
+static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+{
+ MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
+}
+
+static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, byte_mask);
+}
+
+static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
+}
+
+static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
+}
+
+static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
+}
+
+static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+{
+ u64 index = (icm_addr >> 5) | ht_size;
+
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
+}
+
+static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi)
+{
+ dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
+ dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
+ dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+ /* Set GVMI once, this is the same for RX/TX
+ * bits 63_48 of next table base / miss address encode the next GVMI
+ */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
+ DR_STE_ENABLE_FLOW_TAG | flow_tag);
+}
+
+static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+ /* This can be used for both rx_steering_mult and for sx_transmit */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
+}
+
+static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
+}
+
+static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
+ bool go_back)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ DR_STE_ACTION_TYPE_PUSH_VLAN);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
+ /* Due to HW limitation we need to set this bit, otherwise reforamt +
+ * push vlan will not work.
+ */
+ if (go_back)
+ dr_ste_v0_set_go_back_bit(hw_ste_p);
+}
+
+static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
+ int size, bool encap_l3)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
+}
+
+static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_DECAP);
+}
+
+static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_POP_VLAN);
+}
+
+static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_L3_DECAP);
+ MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+}
+
+static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+ u32 re_write_index)
+{
+ MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
+ num_of_actions);
+ MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
+ re_write_index);
+}
+
+static void dr_ste_v0_arr_init_next(u8 **last_ste,
+ u32 *added_stes,
+ enum mlx5dr_ste_entry_type entry_type,
+ u16 gvmi)
+{
+ (*added_stes)++;
+ *last_ste += DR_STE_SIZE;
+ dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
+ entry_type, gvmi);
+}
+
+static void
+dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
+
+ /* We want to make sure the modify header comes before L2
+ * encapsulation. The reason for that is that we support
+ * modify headers for outer headers only
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ dr_ste_v0_set_tx_push_vlan(last_ste,
+ attr->vlans.headers[i],
+ encap);
+ }
+ }
+
+ if (encap) {
+ /* Modify header and encapsulation require a different STEs.
+ * Since modify header STE format doesn't support encapsulation
+ * tunneling_action.
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
+ action_type_set[DR_ACTION_TYP_PUSH_VLAN])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ dr_ste_v0_set_tx_encap(last_ste,
+ attr->reformat_id,
+ attr->reformat_size,
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
+ /* Whenever prio_tag_required enabled, we can be sure that the
+ * previous table (ACL) already push vlan to our packet,
+ * And due to HW limitation we need to set this bit, otherwise
+ * push vlan + reformat will not work.
+ */
+ if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
+ dr_ste_v0_set_go_back_bit(last_ste);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+ dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void
+dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->decap_actions,
+ attr->decap_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
+ dr_ste_v0_set_rx_decap(last_ste);
+
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i ||
+ action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
+ action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ dr_ste_v0_set_rx_pop_vlan(last_ste);
+ }
+ }
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_MODIFY_PKT,
+ attr->gvmi);
+ else
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TAG]) {
+ if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
+ }
+
+ dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v0_set_action_set(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ length = (length == 32) ? 0 : length;
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_add(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ length = (length == 32) ? 0 : length;
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_copy(u8 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
+ MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
+ MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
+}
+
+#define DR_STE_DECAP_L3_MIN_ACTION_NUM 5
+
+static int
+dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
+ u32 hw_action_num;
+ int required_actions;
+ u32 hdr_fld_4b;
+ u16 hdr_fld_2b;
+ u16 vlan_type;
+ bool vlan;
+
+ vlan = (data_sz != HDR_LEN_L2);
+ hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
+ required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
+
+ if (hw_action_num < required_actions)
+ return -ENOMEM;
+
+ /* dmac_47_16 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 16);
+ hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_4b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* smac_47_16 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
+ hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
+ MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* dmac_15_0 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_2b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* ethertype + (optional) vlan */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 32);
+ if (!vlan) {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
+ } else {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
+ hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
+ }
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* smac_15_0 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ if (vlan) {
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ }
+
+ *used_hw_action_num = required_actions;
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ if (mask->smac_47_16 || mask->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
+ mask->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
+ mask->smac_47_16 << 16 | mask->smac_15_0);
+ mask->smac_47_16 = 0;
+ mask->smac_15_0 = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ } else if (mask->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->svlan_tag = 0;
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ if (spec->smac_47_16 || spec->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
+ spec->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
+ spec->smac_47_16 << 16 | spec->smac_15_0);
+ spec->smac_47_16 = 0;
+ spec->smac_15_0 = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_mask->inner_second_cvlan_tag ||
+ misc_mask->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->inner_second_cvlan_tag = 0;
+ misc_mask->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_priority, misc_mask, inner_second_prio);
+ } else {
+ if (misc_mask->outer_second_cvlan_tag ||
+ misc_mask->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->outer_second_cvlan_tag = 0;
+ misc_mask->outer_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_priority, misc_mask, outer_second_prio);
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+ bool inner, u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_spec = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_spec->inner_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->inner_second_cvlan_tag = 0;
+ } else if (misc_spec->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
+ } else {
+ if (misc_spec->outer_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->outer_second_cvlan_tag = 0;
+ } else if (misc_spec->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->outer_second_svlan_tag = 0;
+ }
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
+
+ dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
+
+ return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask,
+ l2_tunneling_network_id, (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
+ (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
+ DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
+}
+
+static int
+dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ if (sb->inner)
+ DR_STE_SET_MPLS(mpls, misc2, inner, tag);
+ else
+ DR_STE_SET_MPLS(mpls, misc2, outer, tag);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
+
+ DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
+ DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
+ DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
+
+ DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
+
+ DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2, outer_first_mpls_over_udp_ttl);
+ }
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
+}
+
+#define ICMP_TYPE_OFFSET_FIRST_DW 24
+#define ICMP_CODE_OFFSET_FIRST_DW 16
+
+static int
+dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
+ u32 *icmp_header_data;
+ int dw0_location;
+ int dw1_location;
+ u8 *icmp_type;
+ u8 *icmp_code;
+ bool is_ipv4;
+
+ is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
+ if (is_ipv4) {
+ icmp_header_data = &misc_3->icmpv4_header_data;
+ icmp_type = &misc_3->icmpv4_type;
+ icmp_code = &misc_3->icmpv4_code;
+ dw0_location = sb->caps->flex_parser_id_icmp_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmp_dw1;
+ } else {
+ icmp_header_data = &misc_3->icmpv6_header_data;
+ icmp_type = &misc_3->icmpv6_type;
+ icmp_code = &misc_3->icmpv6_code;
+ dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
+ }
+
+ switch (dw0_location) {
+ case 4:
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
+ (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
+ (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
+
+ *icmp_type = 0;
+ *icmp_code = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dw1_location) {
+ case 5:
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
+ *icmp_header_data);
+ *icmp_header_data = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ int ret;
+
+ ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
+
+ return 0;
+}
+
+static int
+dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+ DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+ misc_2, metadata_reg_a);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ if (sb->inner) {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_flags, misc3,
+ outer_vxlan_gpe_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_next_protocol, misc3,
+ outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_vni, misc3,
+ outer_vxlan_gpe_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_protocol_type, misc, geneve_protocol_type);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_oam, misc, geneve_oam);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_opt_len, misc, geneve_opt_len);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_vni, misc, geneve_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
+}
+
+static int
+dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+ DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+ DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+ DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
+}
+
+static int
+dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+ DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+ DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+ DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
+ DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
+ misc_mask->source_eswitch_owner_vhca_id = 0;
+}
+
+static int
+dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *dmn = sb->dmn;
+ struct mlx5dr_cmd_caps *caps;
+ u8 *bit_mask = sb->bit_mask;
+ bool source_gvmi_set;
+
+ DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
+
+ if (sb->vhca_id_valid) {
+ /* Find port GVMI based on the eswitch_owner_vhca_id */
+ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+ caps = &dmn->info.caps;
+ else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
+ dmn->peer_dmn->info.caps.gvmi))
+ caps = &dmn->peer_dmn->info.caps;
+ else
+ return -EINVAL;
+
+ misc->source_eswitch_owner_vhca_id = 0;
+ } else {
+ caps = &dmn->info.caps;
+ }
+
+ source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
+ if (source_gvmi_set) {
+ vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ if (!vport_cap) {
+ mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
+ misc->source_port);
+ return -EINVAL;
+ }
+
+ if (vport_cap->vport_gvmi)
+ MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_port = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
+}
+
+struct mlx5dr_ste_ctx ste_ctx_v0 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v0_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v0_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v0_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v0_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v0_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v0_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v0_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
+ .build_icmp_init = &dr_ste_v0_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
+ .build_register_0_init = &dr_ste_v0_build_register_0_init,
+ .build_register_1_init = &dr_ste_v0_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v0_init,
+ .set_next_lu_type = &dr_ste_v0_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v0_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v0_set_miss_addr,
+ .get_miss_addr = &dr_ste_v0_get_miss_addr,
+ .set_hit_addr = &dr_ste_v0_set_hit_addr,
+ .set_byte_mask = &dr_ste_v0_set_byte_mask,
+ .get_byte_mask = &dr_ste_v0_get_byte_mask,
+
+ /* Actions */
+ .set_actions_rx = &dr_ste_v0_set_actions_rx,
+ .set_actions_tx = &dr_ste_v0_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v0_action_modify_field_arr,
+ .set_action_set = &dr_ste_v0_set_action_set,
+ .set_action_add = &dr_ste_v0_set_action_add,
+ .set_action_copy = &dr_ste_v0_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 51880df26724..8d2c3b6e2755 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -120,6 +120,7 @@ struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
struct mlx5dr_matcher_rx_tx;
+struct mlx5dr_ste_ctx;
struct mlx5dr_ste {
u8 *hw_ste;
@@ -154,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
};
struct mlx5dr_ste_htbl {
- u8 lu_type;
+ u16 lu_type;
u16 byte_mask;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
@@ -190,7 +191,7 @@ struct mlx5dr_ste_build {
u8 vhca_id_valid:1;
struct mlx5dr_domain *dmn;
struct mlx5dr_cmd_caps *caps;
- u8 lu_type;
+ u16 lu_type;
u16 byte_mask;
u8 bit_mask[DR_STE_SIZE_MASK];
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
@@ -201,7 +202,7 @@ struct mlx5dr_ste_build {
struct mlx5dr_ste_htbl *
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
- u8 lu_type, u16 byte_mask);
+ u16 lu_type, u16 byte_mask);
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
@@ -219,35 +220,84 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
/* STE utils */
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
-void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
-void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
- struct mlx5dr_ste_htbl *next_htbl);
-void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
-u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
-void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
-void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
-void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 miss_addr);
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 icm_addr, u32 ht_size);
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste,
+ struct mlx5dr_ste_htbl *next_htbl);
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 ste_location);
-void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
-void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
-void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
- int size, bool encap_l3);
-void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
-void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
-void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
-void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
- bool go_back);
-void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
-u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
-void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
- u32 re_write_index);
-void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
+#define MLX5DR_MAX_VLANS 2
+
+struct mlx5dr_ste_actions_attr {
+ u32 modify_index;
+ u16 modify_actions;
+ u32 decap_index;
+ u16 decap_actions;
+ u8 decap_with_vlan:1;
+ u64 final_icm_addr;
+ u32 flow_tag;
+ u32 ctr_id;
+ u16 gvmi;
+ u16 hit_gvmi;
+ u32 reformat_id;
+ u32 reformat_size;
+ struct {
+ int count;
+ u32 headers[MLX5DR_MAX_VLANS];
+ } vlans;
+};
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter);
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+ void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num);
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
void mlx5dr_ste_free(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher);
@@ -271,8 +321,6 @@ static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
return !ste->refcount;
}
-void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
- struct mlx5dr_ste_htbl *next_htbl);
bool mlx5dr_ste_equal_tag(void *src, void *dst);
int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -289,65 +337,85 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
@@ -574,10 +642,10 @@ struct mlx5dr_match_misc3 {
u32 outer_vxlan_gpe_next_protocol:8;
u32 icmpv4_header_data;
u32 icmpv6_header_data;
- u32 icmpv6_code:8;
- u32 icmpv6_type:8;
- u32 icmpv4_code:8;
- u32 icmpv4_type:8;
+ u8 icmpv6_code;
+ u8 icmpv6_type;
+ u8 icmpv4_code;
+ u8 icmpv4_type;
u8 reserved_auto3[0x1c];
};
@@ -671,6 +739,7 @@ struct mlx5dr_domain {
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct mlx5dr_domain_cache cache;
+ struct mlx5dr_ste_ctx *ste_ctx;
};
struct mlx5dr_table_rx_tx {
@@ -725,6 +794,14 @@ struct mlx5dr_rule_member {
struct list_head use_ste_list;
};
+struct mlx5dr_ste_action_modify_field {
+ u16 hw_field;
+ u8 start;
+ u8 end;
+ u8 l3_type;
+ u8 l4_type;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -1000,7 +1077,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_htbl_connect_info *connect_info,
bool update_hw_ste);
-void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index e01c3766c7de..83df6df6b459 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -5,91 +5,6 @@
#define MLX5_IFC_DR_H
enum {
- MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
- MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
- MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
- MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
- MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
- MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
- MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
- MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
- MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
- MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
-};
-
-enum {
- MLX5DR_STE_LU_TYPE_NOP = 0x00,
- MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
- MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
- MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
- MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
- MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
- MLX5DR_STE_LU_TYPE_GRE = 0x16,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
- MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
- MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
- MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
};
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 5d9ddf36fb4e..e6f677e42007 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -267,7 +267,7 @@ struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw)
const void *first_tlv_ptr;
const void *cb_top_ptr;
- mfa2_file = kcalloc(1, sizeof(*mfa2_file), GFP_KERNEL);
+ mfa2_file = kzalloc(sizeof(*mfa2_file), GFP_KERNEL);
if (!mfa2_file)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4eeae8d78006..d0052537e627 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -323,8 +323,8 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
struct pci_dev *pdev = mlxsw_pci->pdev;
dma_addr_t mapaddr;
- mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
- if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+ mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
@@ -342,7 +342,7 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
if (!frag_len)
return;
- pci_unmap_single(pdev, mapaddr, frag_len, direction);
+ dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
@@ -858,9 +858,9 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
tasklet_setup(&q->tasklet, q_ops->tasklet);
mem_item->size = MLXSW_PCI_AQ_SIZE;
- mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
- mem_item->size,
- &mem_item->mapaddr);
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size, &mem_item->mapaddr,
+ GFP_KERNEL);
if (!mem_item->buf)
return -ENOMEM;
@@ -890,8 +890,8 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err_q_ops_init:
kfree(q->elem_info);
err_elem_info_alloc:
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
return err;
}
@@ -903,8 +903,8 @@ static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
q_ops->fini(mlxsw_pci, q);
kfree(q->elem_info);
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -1273,9 +1273,9 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
mem_item = &mlxsw_pci->fw_area.items[i];
mem_item->size = MLXSW_PCI_PAGE_SIZE;
- mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
- mem_item->size,
- &mem_item->mapaddr);
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size,
+ &mem_item->mapaddr, GFP_KERNEL);
if (!mem_item->buf) {
err = -ENOMEM;
goto err_alloc;
@@ -1304,8 +1304,8 @@ err_alloc:
for (i--; i >= 0; i--) {
mem_item = &mlxsw_pci->fw_area.items[i];
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
kfree(mlxsw_pci->fw_area.items);
return err;
@@ -1321,8 +1321,8 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
mem_item = &mlxsw_pci->fw_area.items[i];
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
kfree(mlxsw_pci->fw_area.items);
}
@@ -1347,8 +1347,8 @@ static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
int err = 0;
mbox->size = MLXSW_CMD_MBOX_SIZE;
- mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
- &mbox->mapaddr);
+ mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
+ &mbox->mapaddr, GFP_KERNEL);
if (!mbox->buf) {
dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
err = -ENOMEM;
@@ -1362,8 +1362,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
{
struct pci_dev *pdev = mlxsw_pci->pdev;
- pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
- mbox->mapaddr);
+ dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+ mbox->mapaddr);
}
static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
@@ -1848,17 +1848,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
- goto err_pci_set_dma_mask;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_pci_set_dma_mask;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index cea42f6ed89b..20c4f3c2cf23 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -527,7 +527,6 @@ mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
u8 state)
{
@@ -535,9 +534,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -659,7 +655,6 @@ err_port_bridge_vlan_learning_set:
static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port,
- struct switchdev_trans *trans,
unsigned long brport_flags)
{
if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
@@ -669,16 +664,12 @@ static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
unsigned long brport_flags)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
@@ -724,35 +715,26 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
}
static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
unsigned long ageing_clock_t)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- if (switchdev_trans_ph_prepare(trans)) {
- if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
- ageing_time > MLXSW_SP_MAX_AGEING_TIME)
- return -ERANGE;
- else
- return 0;
- }
+ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
+ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
+ return -ERANGE;
return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
}
static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool vlan_enabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
@@ -765,16 +747,12 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
u16 vlan_proto)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
@@ -784,16 +762,12 @@ static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_p
}
static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_port_mrouter)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
@@ -825,7 +799,6 @@ static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool mc_disabled)
{
@@ -834,9 +807,6 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -896,16 +866,12 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_mrouter)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -921,54 +887,52 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
- trans,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
- err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.vlan_protocol);
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
- err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
- err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mc_disabled);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
- err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mrouter);
break;
@@ -977,8 +941,7 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
}
- if (switchdev_trans_ph_commit(trans))
- mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
return err;
}
@@ -1211,23 +1174,20 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
const struct switchdev_obj_port_vlan *vlan)
{
u16 pvid;
- u16 vid;
pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
if (!pvid)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
- if (vid != pvid) {
- netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
- return -EBUSY;
- }
- } else {
- if (vid == pvid) {
- netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
- return -EBUSY;
- }
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vlan->vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vlan->vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
}
}
@@ -1236,7 +1196,6 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1244,14 +1203,12 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
- u16 vid;
if (netif_is_bridge_master(orig_dev)) {
int err = 0;
if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
- br_vlan_enabled(orig_dev) &&
- switchdev_trans_ph_prepare(trans))
+ br_vlan_enabled(orig_dev))
err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
orig_dev, vlan);
if (!err)
@@ -1259,9 +1216,6 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
@@ -1269,17 +1223,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
- vid, flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
}
static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
@@ -1716,8 +1662,7 @@ static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
@@ -1729,9 +1674,6 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_index;
int err = 0;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
return 0;
@@ -1813,7 +1755,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
@@ -1823,22 +1764,19 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
- extack);
- if (switchdev_trans_ph_prepare(trans)) {
- /* The event is emitted before the changes are actually
- * applied to the bridge. Therefore schedule the respin
- * call for later, so that the respin logic sees the
- * updated bridge state.
- */
- mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
- }
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
+
+ /* The event is emitted before the changes are actually
+ * applied to the bridge. Therefore schedule the respin
+ * call for later, so that the respin logic sees the
+ * updated bridge state.
+ */
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -1873,7 +1811,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
- u16 vid;
if (netif_is_bridge_master(orig_dev))
return -EOPNOTSUPP;
@@ -1885,8 +1822,7 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
+ mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
return 0;
}
@@ -3406,12 +3342,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct switchdev_trans *trans = port_obj_info->trans;
struct mlxsw_sp_bridge_device *bridge_device;
struct netlink_ext_ack *extack;
struct mlxsw_sp *mlxsw_sp;
struct net_device *br_dev;
- u16 vid;
extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
br_dev = netdev_master_upper_dev_get(vxlan_dev);
@@ -3424,9 +3358,6 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
port_obj_info->handled = true;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)
return -EINVAL;
@@ -3434,18 +3365,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
if (!bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
- vxlan_dev, vid,
- flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
+ vxlan_dev, vlan->vid,
+ flag_untagged,
+ flag_pvid, extack);
}
static void
@@ -3458,7 +3381,6 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp *mlxsw_sp;
struct net_device *br_dev;
- u16 vid;
br_dev = netdev_master_upper_dev_get(vxlan_dev);
if (!br_dev)
@@ -3477,9 +3399,8 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
if (!bridge_device->vlan_enabled)
return;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
- vxlan_dev, vid);
+ mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
+ vlan->vid);
}
static int
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 42bc014136fe..93df3049cdc0 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -31,6 +31,8 @@ config KS8851
select MII
select CRC32
select EEPROM_93CX6
+ select PHYLIB
+ select MICREL_PHY
help
SPI driver for Micrel KS8851 SPI attached network chip.
@@ -40,6 +42,8 @@ config KS8851_MLL
select MII
select CRC32
select EEPROM_93CX6
+ select PHYLIB
+ select MICREL_PHY
help
This platform driver is for Micrel KS8851 Address/data bus
multiplexed network chip.
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 2b319e451121..e2eb0caeac82 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -358,6 +358,7 @@ union ks8851_tx_hdr {
* @vdd_reg: Optional regulator supplying the chip
* @vdd_io: Optional digital power supply for IO
* @gpio: Optional reset_n gpio
+ * @mii_bus: Pointer to MII bus structure
* @lock: Bus access lock callback
* @unlock: Bus access unlock callback
* @rdreg16: 16bit register read callback
@@ -403,6 +404,7 @@ struct ks8851_net {
struct regulator *vdd_reg;
struct regulator *vdd_io;
int gpio;
+ struct mii_bus *mii_bus;
void (*lock)(struct ks8851_net *ks,
unsigned long *flags);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 6fc7483aea03..2feed6ce19d3 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -23,6 +21,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "ks8851.h"
@@ -932,7 +931,25 @@ static int ks8851_phy_reg(int reg)
return KS_P1ANLPR;
}
- return 0x0;
+ return -EOPNOTSUPP;
+}
+
+static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
+ int result;
+ int ksreg;
+
+ ksreg = ks8851_phy_reg(reg);
+ if (ksreg < 0)
+ return ksreg;
+
+ ks8851_lock(ks, &flags);
+ result = ks8851_rdreg16(ks, ksreg);
+ ks8851_unlock(ks, &flags);
+
+ return result;
}
/**
@@ -952,20 +969,13 @@ static int ks8851_phy_reg(int reg)
*/
static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
{
- struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
- int ksreg;
- int result;
+ int ret;
- ksreg = ks8851_phy_reg(reg);
- if (!ksreg)
+ ret = ks8851_phy_read_common(dev, phy_addr, reg);
+ if (ret < 0)
return 0x0; /* no error return allowed, so use zero */
- ks8851_lock(ks, &flags);
- result = ks8851_rdreg16(ks, ksreg);
- ks8851_unlock(ks, &flags);
-
- return result;
+ return ret;
}
static void ks8851_phy_write(struct net_device *dev,
@@ -976,13 +986,37 @@ static void ks8851_phy_write(struct net_device *dev,
int ksreg;
ksreg = ks8851_phy_reg(reg);
- if (ksreg) {
+ if (ksreg >= 0) {
ks8851_lock(ks, &flags);
ks8851_wrreg16(ks, ksreg, value);
ks8851_unlock(ks, &flags);
}
}
+static int ks8851_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct ks8851_net *ks = bus->priv;
+
+ if (phy_id != 0)
+ return -EOPNOTSUPP;
+
+ /* KS8851 PHY ID registers are swapped in HW, swap them back. */
+ if (reg == MII_PHYSID1)
+ reg = MII_PHYSID2;
+ else if (reg == MII_PHYSID2)
+ reg = MII_PHYSID1;
+
+ return ks8851_phy_read_common(ks->netdev, phy_id, reg);
+}
+
+static int ks8851_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ struct ks8851_net *ks = bus->priv;
+
+ ks8851_phy_write(ks->netdev, phy_id, reg, val);
+ return 0;
+}
+
/**
* ks8851_read_selftest - read the selftest memory info.
* @ks: The device state
@@ -1046,6 +1080,42 @@ int ks8851_resume(struct device *dev)
}
#endif
+static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "ks8851_eth_mii";
+ mii_bus->read = ks8851_mdio_read;
+ mii_bus->write = ks8851_mdio_write;
+ mii_bus->priv = ks;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)BIT(0));
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = mdiobus_register(mii_bus);
+ if (ret)
+ goto err_mdiobus_register;
+
+ ks->mii_bus = mii_bus;
+
+ return 0;
+
+err_mdiobus_register:
+ mdiobus_free(mii_bus);
+ return ret;
+}
+
+static void ks8851_unregister_mdiobus(struct ks8851_net *ks)
+{
+ mdiobus_unregister(ks->mii_bus);
+ mdiobus_free(ks->mii_bus);
+}
+
int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int msg_en)
{
@@ -1104,6 +1174,8 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
+ SET_NETDEV_DEV(netdev, dev);
+
/* setup EEPROM state */
ks->eeprom.data = ks;
ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
@@ -1120,6 +1192,10 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
dev_info(dev, "message enable is %d\n", msg_en);
+ ret = ks8851_register_mdiobus(ks, dev);
+ if (ret)
+ goto err_mdio;
+
/* set the default message enable */
ks->msg_enable = netif_msg_init(msg_en, NETIF_MSG_DRV |
NETIF_MSG_PROBE |
@@ -1128,7 +1204,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
skb_queue_head_init(&ks->txq);
netdev->ethtool_ops = &ks8851_ethtool_ops;
- SET_NETDEV_DEV(netdev, dev);
dev_set_drvdata(dev, ks);
@@ -1156,7 +1231,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "failed to register network device\n");
- goto err_netdev;
+ goto err_id;
}
netdev_info(netdev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
@@ -1165,8 +1240,9 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
return 0;
-err_netdev:
err_id:
+ ks8851_unregister_mdiobus(ks);
+err_mdio:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
regulator_disable(ks->vdd_reg);
@@ -1180,6 +1256,8 @@ int ks8851_remove_common(struct device *dev)
{
struct ks8851_net *priv = dev_get_drvdata(dev);
+ ks8851_unregister_mdiobus(priv);
+
if (netif_msg_drv(priv))
dev_info(dev, "remove\n");
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 3bab0cb2b1a5..2e8fcce50f9d 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 4ec7f1615977..479406ecbaa3 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 58f94c3d80f9..346bba2730ad 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -6,7 +6,8 @@ mscc_ocelot_switch_lib-y := \
ocelot_police.o \
ocelot_vcap.o \
ocelot_flower.o \
- ocelot_ptp.o
+ ocelot_ptp.o \
+ ocelot_devlink.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
ocelot_vsc7514.o \
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ff87a0bc089c..5b2c0cea49ea 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -221,25 +221,20 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware, struct switchdev_trans *trans)
+ bool vlan_aware)
{
+ struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_vcap_filter *filter;
u32 val;
- if (switchdev_trans_ph_prepare(trans)) {
- struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
- struct ocelot_vcap_filter *filter;
-
- list_for_each_entry(filter, &block->rules, list) {
- if (filter->ingress_port_mask & BIT(port) &&
- filter->action.vid_replace_ena) {
- dev_err(ocelot->dev,
- "Cannot change VLAN state with vlan modify rules active\n");
- return -EBUSY;
- }
+ list_for_each_entry(filter, &block->rules, list) {
+ if (filter->ingress_port_mask & BIT(port) &&
+ filter->action.vid_replace_ena) {
+ dev_err(ocelot->dev,
+ "Cannot change VLAN state with vlan modify rules active\n");
+ return -EBUSY;
}
-
- return 0;
}
ocelot_port->vlan_aware = vlan_aware;
@@ -1192,7 +1187,6 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_vlan pvid = {0}, native_vlan = {0};
- struct switchdev_trans trans;
int ret;
ocelot->bridge_mask &= ~BIT(port);
@@ -1200,13 +1194,7 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- trans.ph_prepare = true;
- ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
- if (ret)
- return ret;
-
- trans.ph_prepare = false;
- ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ ret = ocelot_port_vlan_filtering(ocelot, port, false);
if (ret)
return ret;
@@ -1379,7 +1367,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
pause_stop);
/* Tail dropping watermarks */
- atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) /
+ atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
OCELOT_BUFFER_CELL_SZ;
atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
@@ -1492,6 +1480,21 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
ANA_PORT_VLAN_CFG, cpu);
}
+static void ocelot_detect_features(struct ocelot *ocelot)
+{
+ int mmgt, eq_ctrl;
+
+ /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
+ * the number of 240-byte free memory words (aka 4-cell chunks) and not
+ * 192 bytes as the documentation incorrectly says.
+ */
+ mmgt = ocelot_read(ocelot, SYS_MMGT);
+ ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
+
+ eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
+ ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
+}
+
int ocelot_init(struct ocelot *ocelot)
{
char queue_name[32];
@@ -1534,6 +1537,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
+ ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 291d39d49c4e..e8621dbc14f7 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -121,13 +121,15 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
struct phy_device *phy);
-
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
+int ocelot_devlink_init(struct ocelot *ocelot);
+void ocelot_devlink_teardown(struct ocelot *ocelot);
+int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
+ enum devlink_port_flavour flavour);
+void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
+extern const struct devlink_ops ocelot_devlink_ops;
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
new file mode 100644
index 000000000000..edafbd37d12c
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright 2020-2021 NXP Semiconductors
+ */
+#include <net/devlink.h>
+#include "ocelot.h"
+
+/* The queue system tracks four resource consumptions:
+ * Resource 0: Memory tracked per source port
+ * Resource 1: Frame references tracked per source port
+ * Resource 2: Memory tracked per destination port
+ * Resource 3: Frame references tracked per destination port
+ */
+#define OCELOT_RESOURCE_SZ 256
+#define OCELOT_NUM_RESOURCES 4
+
+#define BUF_xxxx_I (0 * OCELOT_RESOURCE_SZ)
+#define REF_xxxx_I (1 * OCELOT_RESOURCE_SZ)
+#define BUF_xxxx_E (2 * OCELOT_RESOURCE_SZ)
+#define REF_xxxx_E (3 * OCELOT_RESOURCE_SZ)
+
+/* For each resource type there are 4 types of watermarks:
+ * Q_RSRV: reservation per QoS class per port
+ * PRIO_SHR: sharing watermark per QoS class across all ports
+ * P_RSRV: reservation per port
+ * COL_SHR: sharing watermark per color (drop precedence) across all ports
+ */
+#define xxx_Q_RSRV_x 0
+#define xxx_PRIO_SHR_x 216
+#define xxx_P_RSRV_x 224
+#define xxx_COL_SHR_x 254
+
+/* Reservation Watermarks
+ * ----------------------
+ *
+ * For setting up the reserved areas, egress watermarks exist per port and per
+ * QoS class for both ingress and egress.
+ */
+
+/* Amount of packet buffer
+ * | per QoS class
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * BUF_Q_RSRV_E
+ */
+#define BUF_Q_RSRV_E(port, prio) \
+ (BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of packet buffer
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * BUF_P_RSRV_E
+ */
+#define BUF_P_RSRV_E(port) \
+ (BUF_xxxx_E + xxx_P_RSRV_x + (port))
+
+/* Amount of packet buffer
+ * | per QoS class
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * BUF_Q_RSRV_I
+ */
+#define BUF_Q_RSRV_I(port, prio) \
+ (BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of packet buffer
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * BUF_P_RSRV_I
+ */
+#define BUF_P_RSRV_I(port) \
+ (BUF_xxxx_I + xxx_P_RSRV_x + (port))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * REF_Q_RSRV_E
+ */
+#define REF_Q_RSRV_E(port, prio) \
+ (REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of frame references
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * REF_P_RSRV_E
+ */
+#define REF_P_RSRV_E(port) \
+ (REF_xxxx_E + xxx_P_RSRV_x + (port))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * REF_Q_RSRV_I
+ */
+#define REF_Q_RSRV_I(port, prio) \
+ (REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of frame references
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * REF_P_RSRV_I
+ */
+#define REF_P_RSRV_I(port) \
+ (REF_xxxx_I + xxx_P_RSRV_x + (port))
+
+/* Sharing Watermarks
+ * ------------------
+ *
+ * The shared memory area is shared between all ports.
+ */
+
+/* Amount of buffer
+ * | per QoS class
+ * | | from the shared memory area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * BUF_PRIO_SHR_E
+ */
+#define BUF_PRIO_SHR_E(prio) \
+ (BUF_xxxx_E + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of buffer
+ * | per color (drop precedence level)
+ * | | from the shared memory area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * BUF_COL_SHR_E
+ */
+#define BUF_COL_SHR_E(dp) \
+ (BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of buffer
+ * | per QoS class
+ * | | from the shared memory area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * BUF_PRIO_SHR_I
+ */
+#define BUF_PRIO_SHR_I(prio) \
+ (BUF_xxxx_I + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of buffer
+ * | per color (drop precedence level)
+ * | | from the shared memory area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * BUF_COL_SHR_I
+ */
+#define BUF_COL_SHR_I(dp) \
+ (BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | from the shared area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * REF_PRIO_SHR_E
+ */
+#define REF_PRIO_SHR_E(prio) \
+ (REF_xxxx_E + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of frame references
+ * | per color (drop precedence level)
+ * | | from the shared area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * REF_COL_SHR_E
+ */
+#define REF_COL_SHR_E(dp) \
+ (REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | from the shared area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * REF_PRIO_SHR_I
+ */
+#define REF_PRIO_SHR_I(prio) \
+ (REF_xxxx_I + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of frame references
+ * | per color (drop precedence level)
+ * | | from the shared area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * REF_COL_SHR_I
+ */
+#define REF_COL_SHR_I(dp) \
+ (REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
+
+static u32 ocelot_wm_read(struct ocelot *ocelot, int index)
+{
+ int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index);
+
+ return ocelot->ops->wm_dec(wm);
+}
+
+static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val)
+{
+ u32 wm = ocelot->ops->wm_enc(val);
+
+ ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index);
+}
+
+static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse,
+ u32 *maxuse)
+{
+ int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index);
+
+ return ocelot->ops->wm_stat(res_stat, inuse, maxuse);
+}
+
+/* The hardware comes out of reset with strange defaults: the sum of all
+ * reservations for frame memory is larger than the total buffer size.
+ * One has to wonder how can the reservation watermarks still guarantee
+ * anything under congestion.
+ * Bring some sense into the hardware by changing the defaults to disable all
+ * reservations and rely only on the sharing watermark for frames with drop
+ * precedence 0. The user can still explicitly request reservations per port
+ * and per port-tc through devlink-sb.
+ */
+static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot,
+ int port)
+{
+ int prio;
+
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0);
+ ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0);
+ ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0);
+ ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0);
+ }
+
+ ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0);
+ ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0);
+ ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0);
+ ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0);
+}
+
+/* We want the sharing watermarks to consume all nonreserved resources, for
+ * efficient resource utilization (a single traffic flow should be able to use
+ * up the entire buffer space and frame resources as long as there's no
+ * interference).
+ * The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2
+ * per color (drop precedence).
+ * The trouble with configuring these sharing watermarks is that:
+ * (1) There's a risk that we overcommit the resources if we configure
+ * (a) all 8 per-TC sharing watermarks to the max
+ * (b) all 2 per-color sharing watermarks to the max
+ * (2) There's a risk that we undercommit the resources if we configure
+ * (a) all 8 per-TC sharing watermarks to "max / 8"
+ * (b) all 2 per-color sharing watermarks to "max / 2"
+ * So for Linux, let's just disable the sharing watermarks per traffic class
+ * (setting them to 0 will make them always exceeded), and rely only on the
+ * sharing watermark for drop priority 0. So frames with drop priority set to 1
+ * by QoS classification or policing will still be allowed, but only as long as
+ * the port and port-TC reservations are not exceeded.
+ */
+static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot)
+{
+ int prio;
+
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0);
+ ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0);
+ ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0);
+ ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0);
+ }
+}
+
+static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i,
+ u32 *buf_rsrv_e)
+{
+ int port, prio;
+
+ *buf_rsrv_i = 0;
+ *buf_rsrv_e = 0;
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ *buf_rsrv_i += ocelot_wm_read(ocelot,
+ BUF_Q_RSRV_I(port, prio));
+ *buf_rsrv_e += ocelot_wm_read(ocelot,
+ BUF_Q_RSRV_E(port, prio));
+ }
+
+ *buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port));
+ *buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port));
+ }
+
+ *buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ;
+ *buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ;
+}
+
+static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i,
+ u32 *ref_rsrv_e)
+{
+ int port, prio;
+
+ *ref_rsrv_i = 0;
+ *ref_rsrv_e = 0;
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ *ref_rsrv_i += ocelot_wm_read(ocelot,
+ REF_Q_RSRV_I(port, prio));
+ *ref_rsrv_e += ocelot_wm_read(ocelot,
+ REF_Q_RSRV_E(port, prio));
+ }
+
+ *ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port));
+ *ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port));
+ }
+}
+
+/* Calculate all reservations, then set up the sharing watermark for DP=0 to
+ * consume the remaining resources up to the pool's configured size.
+ */
+static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot)
+{
+ u32 buf_rsrv_i, buf_rsrv_e;
+ u32 ref_rsrv_i, ref_rsrv_e;
+ u32 buf_shr_i, buf_shr_e;
+ u32 ref_shr_i, ref_shr_e;
+
+ ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
+ ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
+
+ buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] -
+ buf_rsrv_i;
+ buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] -
+ buf_rsrv_e;
+ ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] -
+ ref_rsrv_i;
+ ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] -
+ ref_rsrv_e;
+
+ buf_shr_i /= OCELOT_BUFFER_CELL_SZ;
+ buf_shr_e /= OCELOT_BUFFER_CELL_SZ;
+
+ ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e);
+ ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e);
+ ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0);
+ ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0);
+ ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0);
+}
+
+/* Ensure that all reservations can be enforced */
+static int ocelot_watermark_validate(struct ocelot *ocelot,
+ struct netlink_ext_ack *extack)
+{
+ u32 buf_rsrv_i, buf_rsrv_e;
+ u32 ref_rsrv_i, ref_rsrv_e;
+
+ ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
+ ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
+
+ if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress frame reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress frame reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress reference reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress reference reservations exceed pool size");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/* The hardware works like this:
+ *
+ * Frame forwarding decision taken
+ * |
+ * v
+ * +--------------------+--------------------+--------------------+
+ * | | | |
+ * v v v v
+ * Ingress memory Egress memory Ingress frame Egress frame
+ * check check reference check reference check
+ * | | | |
+ * v v v v
+ * BUF_Q_RSRV_I ok BUF_Q_RSRV_E ok REF_Q_RSRV_I ok REF_Q_RSRV_E ok
+ *(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_P_RSRV_I ok| BUF_P_RSRV_E ok| REF_P_RSRV_I ok| REF_P_RSRV_E ok|
+ * (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_PRIO_SHR_I ok| BUF_PRIO_SHR_E ok| REF_PRIO_SHR_I ok| REF_PRIO_SHR_E ok|
+ * (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_COL_SHR_I ok| BUF_COL_SHR_E ok| REF_COL_SHR_I ok| REF_COL_SHR_E ok|
+ * (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v v v v v v v v
+ * fail success fail success fail success fail success
+ * | | | | | | | |
+ * v v v v v v v v
+ * +-----+----+ +-----+----+ +-----+----+ +-----+-----+
+ * | | | |
+ * +-------> OR <-------+ +-------> OR <-------+
+ * | |
+ * v v
+ * +----------------> AND <-----------------+
+ * |
+ * v
+ * FIFO drop / accept
+ *
+ * We are modeling each of the 4 parallel lookups as a devlink-sb pool.
+ * At least one (ingress or egress) memory pool and one (ingress or egress)
+ * frame reference pool need to have resources for frame acceptance to succeed.
+ *
+ * The following watermarks are controlled explicitly through devlink-sb:
+ * BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E
+ * BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E
+ * The following watermarks are controlled implicitly through devlink-sb:
+ * BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E
+ * The following watermarks are unused and disabled:
+ * BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E
+ *
+ * This function overrides the hardware defaults with more sane ones (no
+ * reservations by default, let sharing use all resources) and disables the
+ * unused watermarks.
+ */
+static void ocelot_watermark_init(struct ocelot *ocelot)
+{
+ int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0);
+ int port;
+
+ ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE);
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++)
+ ocelot_disable_reservation_watermarks(ocelot, port);
+
+ ocelot_disable_tc_sharing_watermarks(ocelot);
+ ocelot_setup_sharing_watermarks(ocelot);
+}
+
+/* Pool size and type are fixed up at runtime. Keeping this structure to
+ * look up the cell size multipliers.
+ */
+static const struct devlink_sb_pool_info ocelot_sb_pool[] = {
+ [OCELOT_SB_BUF] = {
+ .cell_size = OCELOT_BUFFER_CELL_SZ,
+ .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ },
+ [OCELOT_SB_REF] = {
+ .cell_size = 1,
+ .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ },
+};
+
+/* Returns the pool size configured through ocelot_sb_pool_set */
+int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ if (sb_index >= OCELOT_SB_NUM)
+ return -ENODEV;
+ if (pool_index >= OCELOT_SB_POOL_NUM)
+ return -ENODEV;
+
+ *pool_info = ocelot_sb_pool[sb_index];
+ pool_info->size = ocelot->pool_size[sb_index][pool_index];
+ if (pool_index)
+ pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS;
+ else
+ pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_pool_get);
+
+/* The pool size received here configures the total amount of resources used on
+ * ingress (or on egress, depending upon the pool index). The pool size, minus
+ * the values for the port and port-tc reservations, is written into the
+ * COL_SHR(dp=0) sharing watermark.
+ */
+int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ u32 old_pool_size;
+ int err;
+
+ if (sb_index >= OCELOT_SB_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid sb, use 0 for buffers and 1 for frame references");
+ return -ENODEV;
+ }
+ if (pool_index >= OCELOT_SB_POOL_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid pool, use 0 for ingress and 1 for egress");
+ return -ENODEV;
+ }
+ if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only static threshold supported");
+ return -EOPNOTSUPP;
+ }
+
+ old_pool_size = ocelot->pool_size[sb_index][pool_index];
+ ocelot->pool_size[sb_index][pool_index] = size;
+
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot->pool_size[sb_index][pool_index] = old_pool_size;
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_pool_set);
+
+/* This retrieves the configuration made with ocelot_sb_port_pool_set */
+int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ *p_threshold = ocelot_wm_read(ocelot, wm_index);
+ *p_threshold *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_port_pool_get);
+
+/* This configures the P_RSRV per-port reserved resource watermark */
+int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ int wm_index, err;
+ u32 old_thr;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
+ return -ENODEV;
+ }
+
+ threshold /= ocelot_sb_pool[sb_index].cell_size;
+
+ old_thr = ocelot_wm_read(ocelot, wm_index);
+ ocelot_wm_write(ocelot, wm_index, threshold);
+
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot_wm_write(ocelot, wm_index, old_thr);
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_port_pool_set);
+
+/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */
+int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ *p_threshold = ocelot_wm_read(ocelot, wm_index);
+ *p_threshold *= ocelot_sb_pool[sb_index].cell_size;
+
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ *p_pool_index = 0;
+ else
+ *p_pool_index = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get);
+
+/* This configures the Q_RSRV per-port-tc reserved resource watermark */
+int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ int wm_index, err;
+ u32 old_thr;
+
+ /* Paranoid check? */
+ if (pool_index == OCELOT_SB_POOL_ING &&
+ pool_type != DEVLINK_SB_POOL_TYPE_INGRESS)
+ return -EINVAL;
+ if (pool_index == OCELOT_SB_POOL_EGR &&
+ pool_type != DEVLINK_SB_POOL_TYPE_EGRESS)
+ return -EINVAL;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
+ return -ENODEV;
+ }
+
+ threshold /= ocelot_sb_pool[sb_index].cell_size;
+
+ old_thr = ocelot_wm_read(ocelot, wm_index);
+ ocelot_wm_write(ocelot, wm_index, threshold);
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot_wm_write(ocelot, wm_index, old_thr);
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set);
+
+/* The hardware does not support atomic snapshots, we'll read out the
+ * occupancy registers individually and have this as just a stub.
+ */
+int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_snapshot);
+
+/* The watermark occupancy registers are cleared upon read,
+ * so let's read them.
+ */
+int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index)
+{
+ u32 inuse, maxuse;
+ int port, prio;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio),
+ &inuse, &maxuse);
+ }
+ ocelot_wm_status(ocelot, BUF_P_RSRV_I(port),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, BUF_P_RSRV_E(port),
+ &inuse, &maxuse);
+ }
+ break;
+ case OCELOT_SB_REF:
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio),
+ &inuse, &maxuse);
+ }
+ ocelot_wm_status(ocelot, REF_P_RSRV_I(port),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, REF_P_RSRV_E(port),
+ &inuse, &maxuse);
+ }
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_max_clear);
+
+/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */
+int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
+ *p_cur *= ocelot_sb_pool[sb_index].cell_size;
+ *p_max *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get);
+
+/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */
+int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
+ *p_cur *= ocelot_sb_pool[sb_index].cell_size;
+ *p_max *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get);
+
+int ocelot_devlink_sb_register(struct ocelot *ocelot)
+{
+ int err;
+
+ err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF,
+ ocelot->packet_buffer_size, 1, 1,
+ OCELOT_NUM_TC, OCELOT_NUM_TC);
+ if (err)
+ return err;
+
+ err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF,
+ ocelot->num_frame_refs, 1, 1,
+ OCELOT_NUM_TC, OCELOT_NUM_TC);
+ if (err) {
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
+ return err;
+ }
+
+ ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size;
+ ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size;
+ ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs;
+ ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs;
+
+ ocelot_watermark_init(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_devlink_sb_register);
+
+void ocelot_devlink_sb_unregister(struct ocelot *ocelot)
+{
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF);
+}
+EXPORT_SYMBOL(ocelot_devlink_sb_unregister);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 42230f92ca9c..9553eb3e441c 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1,13 +1,190 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
+ * This contains glue logic between the switchdev driver operations and the
+ * mscc_ocelot_switch_lib.
+ *
* Copyright (c) 2017, 2019 Microsemi Corporation
+ * Copyright 2020-2021 NXP Semiconductors
*/
#include <linux/if_bridge.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
+{
+ return devlink_priv(dlp->devlink);
+}
+
+static int devlink_port_to_port(struct devlink_port *dlp)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+
+ return dlp - ocelot->devlink_ports;
+}
+
+static int ocelot_devlink_sb_pool_get(struct devlink *dl,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index,
+ u16 pool_index, u32 *p_cur,
+ u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int
+ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index,
+ tc_index, pool_type,
+ p_cur, p_max);
+}
+
+const struct devlink_ops ocelot_devlink_ops = {
+ .sb_pool_get = ocelot_devlink_sb_pool_get,
+ .sb_pool_set = ocelot_devlink_sb_pool_set,
+ .sb_port_pool_get = ocelot_devlink_sb_port_pool_get,
+ .sb_port_pool_set = ocelot_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get,
+};
+
+int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
+ enum devlink_port_flavour flavour)
+{
+ struct devlink_port *dlp = &ocelot->devlink_ports[port];
+ int id_len = sizeof(ocelot->base_mac);
+ struct devlink *dl = ocelot->devlink;
+ struct devlink_port_attrs attrs = {};
+
+ memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len);
+ attrs.switch_id.id_len = id_len;
+ attrs.phys.port_number = port;
+ attrs.flavour = flavour;
+
+ devlink_port_attrs_set(dlp, &attrs);
+
+ return devlink_port_register(dl, dlp, port);
+}
+
+void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port)
+{
+ struct devlink_port *dlp = &ocelot->devlink_ports[port];
+
+ devlink_port_unregister(dlp);
+}
+
+static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return &ocelot->devlink_ports[port];
+}
+
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
struct flow_cls_offload *f,
bool ingress)
@@ -457,7 +634,7 @@ static void ocelot_mact_work(struct work_struct *work)
break;
default:
break;
- };
+ }
kfree(w);
}
@@ -525,20 +702,6 @@ static void ocelot_set_rx_mode(struct net_device *dev)
__dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
}
-static int ocelot_port_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- int port = priv->chip_port;
- int ret;
-
- ret = snprintf(buf, len, "p%d", port);
- if (ret >= len)
- return -EINVAL;
-
- return 0;
-}
-
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -689,18 +852,6 @@ static int ocelot_set_features(struct net_device *dev,
return 0;
}
-static int ocelot_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
-
- ppid->id_len = sizeof(ocelot->base_mac);
- memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
-
- return 0;
-}
-
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -727,7 +878,6 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
.ndo_set_rx_mode = ocelot_set_rx_mode,
- .ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
.ndo_fdb_add = ocelot_port_fdb_add,
@@ -736,9 +886,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
- .ndo_get_port_parent_id = ocelot_get_port_parent_id,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_do_ioctl = ocelot_ioctl,
+ .ndo_get_devlink_port = ocelot_get_devlink_port,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
@@ -825,12 +975,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
- struct switchdev_trans *trans,
u8 state)
{
- if (switchdev_trans_ph_prepare(trans))
- return;
-
ocelot_bridge_stp_state_set(ocelot, port, state);
}
@@ -858,8 +1004,7 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
}
static int ocelot_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
@@ -868,15 +1013,13 @@ static int ocelot_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ocelot_port_attr_stp_state_set(ocelot, port, trans,
- attr->u.stp_state);
+ ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ocelot_port_vlan_filtering(ocelot, port,
- attr->u.vlan_filtering, trans);
+ ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
@@ -890,56 +1033,27 @@ static int ocelot_port_attr_set(struct net_device *dev,
}
static int ocelot_port_obj_add_vlan(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
int ret;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-
- if (switchdev_trans_ph_prepare(trans))
- ret = ocelot_vlan_vid_prepare(dev, vid, pvid,
- untagged);
- else
- ret = ocelot_vlan_vid_add(dev, vid, pvid, untagged);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static int ocelot_port_vlan_del_vlan(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan)
-{
- int ret;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ret = ocelot_vlan_vid_del(dev, vid);
-
- if (ret)
- return ret;
- }
+ ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged);
+ if (ret)
+ return ret;
- return 0;
+ return ocelot_vlan_vid_add(dev, vlan->vid, pvid, untagged);
}
static int ocelot_port_obj_add_mdb(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return ocelot_port_mdb_add(ocelot, port, mdb);
}
@@ -956,7 +1070,6 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
static int ocelot_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
int ret = 0;
@@ -964,12 +1077,10 @@ static int ocelot_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
ret = ocelot_port_obj_add_vlan(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
return -EOPNOTSUPP;
@@ -985,8 +1096,8 @@ static int ocelot_port_obj_del(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- ret = ocelot_port_vlan_del_vlan(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
+ ret = ocelot_vlan_vid_del(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 9cf2bc5f4289..30a38df08a21 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -517,7 +517,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->map = ocelot_regmap;
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
- ocelot->shared_queue_sz = 224 * 1024;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
@@ -764,9 +763,25 @@ static u16 ocelot_wm_enc(u16 value)
return value;
}
+static u16 ocelot_wm_dec(u16 wm)
+{
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+
+static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
.port_to_netdev = ocelot_port_to_netdev,
.netdev_to_port = ocelot_netdev_to_port,
};
@@ -1036,6 +1051,14 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.enable = ocelot_ptp_enable,
};
+static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_port_devlink_teardown(ocelot, port);
+}
+
static void mscc_ocelot_release_ports(struct ocelot *ocelot)
{
int port;
@@ -1063,28 +1086,44 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
struct device_node *portnp;
- int err;
+ bool *registered_ports;
+ int port, err;
+ u32 reg;
ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
if (!ocelot->ports)
return -ENOMEM;
+ ocelot->devlink_ports = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports,
+ sizeof(*ocelot->devlink_ports),
+ GFP_KERNEL);
+ if (!ocelot->devlink_ports)
+ return -ENOMEM;
+
+ registered_ports = kcalloc(ocelot->num_phys_ports, sizeof(bool),
+ GFP_KERNEL);
+ if (!registered_ports)
+ return -ENOMEM;
+
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct device_node *phy_node;
+ struct devlink_port *dlp;
phy_interface_t phy_mode;
struct phy_device *phy;
struct regmap *target;
struct resource *res;
struct phy *serdes;
char res_name[8];
- u32 port;
- if (of_property_read_u32(portnp, "reg", &port))
+ if (of_property_read_u32(portnp, "reg", &reg))
continue;
+ port = reg;
+
snprintf(res_name, sizeof(res_name), "port%d", port);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -1102,15 +1141,26 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
if (!phy)
continue;
+ err = ocelot_port_devlink_init(ocelot, port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL);
+ if (err) {
+ of_node_put(portnp);
+ goto out_teardown;
+ }
+
err = ocelot_probe_port(ocelot, port, target, phy);
if (err) {
of_node_put(portnp);
- return err;
+ goto out_teardown;
}
+ registered_ports[port] = true;
+
ocelot_port = ocelot->ports[port];
priv = container_of(ocelot_port, struct ocelot_port_private,
port);
+ dlp = &ocelot->devlink_ports[port];
+ devlink_port_type_eth_set(dlp, priv->dev);
of_get_phy_mode(portnp, &phy_mode);
@@ -1135,7 +1185,8 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
of_node_put(portnp);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_teardown;
}
serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
@@ -1149,13 +1200,46 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
port);
of_node_put(portnp);
- return err;
+ goto out_teardown;
}
priv->serdes = serdes;
}
+ /* Initialize unused devlink ports at the end */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (registered_ports[port])
+ continue;
+
+ err = ocelot_port_devlink_init(ocelot, port,
+ DEVLINK_PORT_FLAVOUR_UNUSED);
+ if (err) {
+ while (port-- >= 0) {
+ if (!registered_ports[port])
+ continue;
+ ocelot_port_devlink_teardown(ocelot, port);
+ }
+
+ goto out_teardown;
+ }
+ }
+
+ kfree(registered_ports);
+
return 0;
+
+out_teardown:
+ /* Unregister the network interfaces */
+ mscc_ocelot_release_ports(ocelot);
+ /* Tear down devlink ports for the registered network interfaces */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (!registered_ports[port])
+ continue;
+
+ ocelot_port_devlink_teardown(ocelot, port);
+ }
+ kfree(registered_ports);
+ return err;
}
static int mscc_ocelot_probe(struct platform_device *pdev)
@@ -1163,6 +1247,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int err, irq_xtr, irq_ptp_rdy;
struct device_node *ports;
+ struct devlink *devlink;
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
@@ -1186,10 +1271,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (!np && !pdev->dev.platform_data)
return -ENODEV;
- ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
- if (!ocelot)
+ devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot));
+ if (!devlink)
return -ENOMEM;
+ ocelot = devlink_priv(devlink);
+ ocelot->devlink = priv_to_devlink(ocelot);
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
@@ -1206,7 +1293,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[io_target[i].id] = NULL;
continue;
}
- return PTR_ERR(target);
+ err = PTR_ERR(target);
+ goto out_free_devlink;
}
ocelot->targets[io_target[i].id] = target;
@@ -1215,24 +1303,25 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
- return PTR_ERR(hsio);
+ err = PTR_ERR(hsio);
+ goto out_free_devlink;
}
ocelot->targets[HSIO] = hsio;
err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
- return err;
+ goto out_free_devlink;
irq_xtr = platform_get_irq_byname(pdev, "xtr");
if (irq_xtr < 0)
- return -ENODEV;
+ goto out_free_devlink;
err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
- return err;
+ goto out_free_devlink;
irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
@@ -1241,7 +1330,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
IRQF_ONESHOT, "ptp ready",
ocelot);
if (err)
- return err;
+ goto out_free_devlink;
/* Both the PTP interrupt and the PTP bank are available */
ocelot->ptp = 1;
@@ -1250,7 +1339,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(ocelot->dev, "no ethernet-ports child node found\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_free_devlink;
}
ocelot->num_phys_ports = of_get_child_count(ports);
@@ -1265,10 +1355,18 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_put_ports;
- err = mscc_ocelot_init_ports(pdev, ports);
+ err = devlink_register(devlink, ocelot->dev);
if (err)
goto out_ocelot_deinit;
+ err = mscc_ocelot_init_ports(pdev, ports);
+ if (err)
+ goto out_ocelot_devlink_unregister;
+
+ err = ocelot_devlink_sb_register(ocelot);
+ if (err)
+ goto out_ocelot_release_ports;
+
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
if (err) {
@@ -1288,10 +1386,17 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
return 0;
+out_ocelot_release_ports:
+ mscc_ocelot_release_ports(ocelot);
+ mscc_ocelot_teardown_devlink_ports(ocelot);
+out_ocelot_devlink_unregister:
+ devlink_unregister(devlink);
out_ocelot_deinit:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
+out_free_devlink:
+ devlink_free(devlink);
return err;
}
@@ -1300,11 +1405,15 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit_timestamp(ocelot);
+ ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
+ mscc_ocelot_teardown_devlink_ports(ocelot);
+ devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
+ devlink_free(ocelot->devlink);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 0a721f6e8676..e31f8fbbc696 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -3109,13 +3109,19 @@ mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
return 0;
}
-static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->insn.imm != BPF_ADD)
+ return -EOPNOTSUPP;
+
return mem_xadd(nfp_prog, meta, false);
}
-static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->insn.imm != BPF_ADD)
+ return -EOPNOTSUPP;
+
return mem_xadd(nfp_prog, meta, true);
}
@@ -3475,8 +3481,8 @@ static const instr_cb_t instr_cb[256] = {
[BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
[BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
- [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
- [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
+ [BPF_STX | BPF_ATOMIC | BPF_W] = mem_atomic4,
+ [BPF_STX | BPF_ATOMIC | BPF_DW] = mem_atomic8,
[BPF_ST | BPF_MEM | BPF_B] = mem_st1,
[BPF_ST | BPF_MEM | BPF_H] = mem_st2,
[BPF_ST | BPF_MEM | BPF_W] = mem_st4,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index fac9c6f9e197..d0e17eebddd9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -428,9 +428,9 @@ static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
}
-static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
{
- return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
}
static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index e92ee510fd52..9d235c0ce46a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -479,7 +479,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
pr_vlog(env, "map writes not supported\n");
return -EOPNOTSUPP;
}
- if (is_mbpf_xadd(meta)) {
+ if (is_mbpf_atomic(meta)) {
err = nfp_bpf_map_mark_used(env, meta, reg,
NFP_MAP_USE_ATOMIC_CNT);
if (err)
@@ -523,12 +523,17 @@ exit_check_ptr:
}
static int
-nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- struct bpf_verifier_env *env)
+nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
{
const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
+ if (meta->insn.imm != BPF_ADD) {
+ pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
+ return -EOPNOTSUPP;
+ }
+
if (dreg->type != PTR_TO_MAP_VALUE) {
pr_vlog(env, "atomic add not to a map value pointer: %d\n",
dreg->type);
@@ -655,8 +660,8 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
if (is_mbpf_store(meta))
return nfp_bpf_check_store(nfp_prog, meta, env);
- if (is_mbpf_xadd(meta))
- return nfp_bpf_check_xadd(nfp_prog, meta, env);
+ if (is_mbpf_atomic(meta))
+ return nfp_bpf_check_atomic(nfp_prog, meta, env);
if (is_mbpf_alu(meta))
return nfp_bpf_check_alu(nfp_prog, meta, env);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f21fb573ea3e..eeb30680b4dc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1822,8 +1822,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
- xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
@@ -1914,10 +1914,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
unsigned int dma_off;
int act;
- xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
- xdp.data = orig_data;
- xdp.data_meta = orig_data;
- xdp.data_end = orig_data + pkt_len;
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -3656,8 +3656,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = nfp_net_xdp,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index ac4cd5d82e69..162a1ff1e9d2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -979,7 +979,7 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
stats->vlan_inserted++;
}
- if (skb->csum_not_inet)
+ if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
else
stats->csum++;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 5e9f8ee99800..2fcbcecb41d1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -113,7 +113,8 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
return NX_RCODE_INVALID_ARGS;
}
- addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &md_template_addr, GFP_KERNEL);
if (!addr) {
dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
return -ENOMEM;
@@ -133,7 +134,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
cmd.rsp.cmd, size, cmd.rsp.arg2);
}
- pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
+ dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr);
return 0;
}
@@ -281,14 +282,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
rsp_size =
SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
- addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &hostrq_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
- addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &cardrsp_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL) {
err = -ENOMEM;
goto out_free_rq;
@@ -387,9 +388,10 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
recv_ctx->virt_port = prsp->virt_port;
out_free_rsp:
- pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err;
}
@@ -429,14 +431,14 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
struct netxen_cmd_args cmd;
rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
- rq_addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &rq_phys_addr);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
- rsp_addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &rsp_phys_addr);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
@@ -491,10 +493,11 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
err = -EIO;
}
- pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
return err;
}
@@ -745,9 +748,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
recv_ctx = &adapter->recv_ctx;
tx_ring = adapter->tx_ring;
- addr = pci_alloc_consistent(pdev,
- sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
- &recv_ctx->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ &recv_ctx->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate hw context\n");
return -ENOMEM;
@@ -762,8 +765,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
(__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
/* cmd desc ring */
- addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
@@ -776,9 +779,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
- RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"%s: failed to allocate rds ring [%d]\n",
@@ -797,9 +800,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
- STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"%s: failed to allocate sds ring [%d]\n",
@@ -874,19 +877,17 @@ done:
recv_ctx = &adapter->recv_ctx;
if (recv_ctx->hwctx != NULL) {
- pci_free_consistent(adapter->pdev,
- sizeof(struct netxen_ring_ctx) +
- sizeof(uint32_t),
- recv_ctx->hwctx,
- recv_ctx->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ recv_ctx->hwctx, recv_ctx->phys_addr);
recv_ctx->hwctx = NULL;
}
tx_ring = adapter->tx_ring;
if (tx_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- TX_DESC_RINGSIZE(tx_ring),
- tx_ring->desc_head, tx_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
tx_ring->desc_head = NULL;
}
@@ -894,10 +895,10 @@ done:
rds_ring = &recv_ctx->rds_rings[ring];
if (rds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- RCV_DESC_RINGSIZE(rds_ring),
- rds_ring->desc_head,
- rds_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
rds_ring->desc_head = NULL;
}
}
@@ -906,10 +907,10 @@ done:
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- STATUS_DESC_RINGSIZE(sds_ring),
- sds_ring->desc_head,
- sds_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
sds_ring->desc_head = NULL;
}
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 94546ed5f867..08f9477d2ee8 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -102,10 +102,8 @@ void netxen_release_rx_buffers(struct netxen_adapter *adapter)
rx_buf = &(rds_ring->rx_buf_arr[i]);
if (rx_buf->state == NETXEN_BUFFER_FREE)
continue;
- pci_unmap_single(adapter->pdev,
- rx_buf->dma,
- rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pdev->dev, rx_buf->dma,
+ rds_ring->dma_size, DMA_FROM_DEVICE);
if (rx_buf->skb != NULL)
dev_kfree_skb_any(rx_buf->skb);
}
@@ -124,16 +122,16 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
if (buffrag->dma) {
- pci_unmap_single(adapter->pdev, buffrag->dma,
- buffrag->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffrag->dma,
+ buffrag->length, DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
- pci_unmap_page(adapter->pdev, buffrag->dma,
- buffrag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ buffrag->dma, buffrag->length,
+ DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
}
@@ -1250,9 +1248,10 @@ int netxen_init_dummy_dma(struct netxen_adapter *adapter)
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
- adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- &adapter->dummy_dma.phys_addr);
+ adapter->dummy_dma.addr = dma_alloc_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ &adapter->dummy_dma.phys_addr,
+ GFP_KERNEL);
if (adapter->dummy_dma.addr == NULL) {
dev_err(&adapter->pdev->dev,
"ERROR: Could not allocate dummy DMA memory\n");
@@ -1304,10 +1303,10 @@ void netxen_free_dummy_dma(struct netxen_adapter *adapter)
}
if (i) {
- pci_free_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- adapter->dummy_dma.addr,
- adapter->dummy_dma.phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ adapter->dummy_dma.addr,
+ adapter->dummy_dma.phys_addr);
adapter->dummy_dma.addr = NULL;
} else
dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
@@ -1467,10 +1466,10 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter,
if (!adapter->ahw.cut_through)
skb_reserve(skb, 2);
- dma = pci_map_single(pdev, skb->data,
- rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+ dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(pdev, dma)) {
+ if (dma_mapping_error(&pdev->dev, dma)) {
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return 1;
@@ -1491,8 +1490,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
buffer = &rds_ring->rx_buf_arr[index];
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer->dma, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
skb = buffer->skb;
if (!skb)
@@ -1754,13 +1753,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
buffer = &tx_ring->cmd_buf_arr[sw_consumer];
if (buffer->skb) {
frag = &buffer->frag_array[0];
- pci_unmap_single(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, frag->dma, frag->length,
+ DMA_TO_DEVICE);
frag->dma = 0ULL;
for (i = 1; i < buffer->frag_count; i++) {
frag++; /* Get the next frag */
- pci_unmap_page(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, frag->dma,
+ frag->length, DMA_TO_DEVICE);
frag->dma = 0ULL;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index d258e0ccf946..7e6bac85495d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -243,8 +243,8 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter)
cmask = mask;
}
- if (pci_set_dma_mask(pdev, mask) == 0 &&
- pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ if (dma_set_mask(&pdev->dev, mask) == 0 &&
+ dma_set_coherent_mask(&pdev->dev, cmask) == 0) {
adapter->pci_using_dac = 1;
return 0;
}
@@ -277,13 +277,13 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
mask = DMA_BIT_MASK(32+shift);
- err = pci_set_dma_mask(pdev, mask);
+ err = dma_set_mask(&pdev->dev, mask);
if (err)
goto err_out;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- err = pci_set_consistent_dma_mask(pdev, mask);
+ err = dma_set_coherent_mask(&pdev->dev, mask);
if (err)
goto err_out;
}
@@ -293,8 +293,8 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
return 0;
err_out:
- pci_set_dma_mask(pdev, old_mask);
- pci_set_consistent_dma_mask(pdev, old_cmask);
+ dma_set_mask(&pdev->dev, old_mask);
+ dma_set_coherent_mask(&pdev->dev, old_cmask);
return err;
}
@@ -1978,9 +1978,9 @@ netxen_map_tx_skb(struct pci_dev *pdev,
nr_frags = skb_shinfo(skb)->nr_frags;
nf = &pbuf->frag_array[0];
- map = pci_map_single(pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, map))
+ map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
goto out_err;
nf->dma = map;
@@ -2004,12 +2004,12 @@ netxen_map_tx_skb(struct pci_dev *pdev,
unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
- pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
- pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
nf->dma = 0ULL;
out_err:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index ca0ee29a57b5..70c8d3cd85c0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1090,12 +1090,9 @@ static bool qede_rx_xdp(struct qede_dev *edev,
struct xdp_buff xdp;
enum xdp_action act;
- xdp.data_hard_start = page_address(bd->data);
- xdp.data = xdp.data_hard_start + *data_offset;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.rxq = &rxq->xdp_rxq;
- xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
+ xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
+ *len, false);
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9cf960a6d007..4bf94797aac5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -663,8 +663,6 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_get_vf_config = qede_get_vf_config,
.ndo_set_vf_rate = qede_set_vf_rate,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
@@ -688,8 +686,6 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
};
@@ -707,8 +703,6 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
.ndo_xdp_xmit = qede_xdp_transmit,
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 27740c027681..214e347097a7 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -315,12 +315,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
* buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1802,13 +1801,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
* first buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1943,18 +1941,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
goto invalid_seg_count;
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[i],
- mapaddr),
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[i], mapaddr),
dma_unmap_len(&tx_cb->map[i], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
}
qdev->ndev->stats.tx_packets++;
@@ -2021,10 +2017,9 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
qdev->ndev->stats.rx_bytes += length;
skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb->data);
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev);
@@ -2067,10 +2062,9 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb2 = lrg_buf_cb2->skb;
skb_put(skb2, length); /* Just the second buffer length here. */
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb2->data);
skb_checksum_none_assert(skb2);
@@ -2319,9 +2313,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
/*
* Map the skb buffer first.
*/
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
err);
@@ -2357,11 +2351,11 @@ static int ql_send_map(struct ql3_adapter *qdev,
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- map = pci_map_single(qdev->pdev, oal,
+ map = dma_map_single(&qdev->pdev->dev, oal,
sizeof(struct oal),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
@@ -2423,24 +2417,24 @@ map_error:
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[seg], mapaddr),
- dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ DMA_TO_DEVICE);
oal++;
seg++;
}
- pci_unmap_page(qdev->pdev,
+ dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[seg], mapaddr),
dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
dma_unmap_addr(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return NETDEV_TX_BUSY;
@@ -2525,9 +2519,8 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
wmb();
qdev->req_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->req_q_size,
- &qdev->req_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ &qdev->req_q_phy_addr, GFP_KERNEL);
if ((qdev->req_q_virt_addr == NULL) ||
LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
@@ -2536,16 +2529,14 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
}
qdev->rsp_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->rsp_q_size,
- &qdev->rsp_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr, GFP_KERNEL);
if ((qdev->rsp_q_virt_addr == NULL) ||
LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
netdev_err(qdev->ndev, "rspQ allocation failed\n");
- pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
- qdev->req_q_virt_addr,
- qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
return -ENOMEM;
}
@@ -2561,15 +2552,13 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
return;
}
- pci_free_consistent(qdev->pdev,
- qdev->req_q_size,
- qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
qdev->req_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->rsp_q_size,
- qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
qdev->rsp_q_virt_addr = NULL;
@@ -2593,9 +2582,9 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
return -ENOMEM;
qdev->lrg_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- &qdev->lrg_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "lBufQ failed\n");
@@ -2613,15 +2602,16 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
qdev->small_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- &qdev->small_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->small_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
- pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
return -ENOMEM;
}
@@ -2638,17 +2628,15 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
return;
}
kfree(qdev->lrg_buf);
- pci_free_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
qdev->lrg_buf_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- qdev->small_buf_q_alloc_virt_addr,
- qdev->small_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
qdev->small_buf_q_virt_addr = NULL;
@@ -2666,9 +2654,9 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
QL_SMALL_BUFFER_SIZE);
qdev->small_buf_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- &qdev->small_buf_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr, GFP_KERNEL);
if (qdev->small_buf_virt_addr == NULL) {
netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
@@ -2701,10 +2689,10 @@ static void ql_free_small_buffers(struct ql3_adapter *qdev)
return;
}
if (qdev->small_buf_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- qdev->small_buf_virt_addr,
- qdev->small_buf_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
qdev->small_buf_virt_addr = NULL;
}
@@ -2719,10 +2707,10 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
lrg_buf_cb = &qdev->lrg_buf[i];
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb, mapaddr),
dma_unmap_len(lrg_buf_cb, maplen),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
break;
@@ -2774,13 +2762,11 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
* buffer
*/
skb_reserve(skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
- skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data,
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -2865,8 +2851,8 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
* Network Completion Queue Producer Index Register
*/
qdev->shadow_reg_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->shadow_reg_phy_addr, GFP_KERNEL);
if (qdev->shadow_reg_virt_addr != NULL) {
qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
@@ -2921,10 +2907,9 @@ err_small_buffers:
err_buffer_queues:
ql_free_net_req_rsp_queues(qdev);
err_req_rsp:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
return -ENOMEM;
}
@@ -2937,10 +2922,9 @@ static void ql_free_mem_resources(struct ql3_adapter *qdev)
ql_free_buffer_queues(qdev);
ql_free_net_req_rsp_queues(qdev);
if (qdev->shadow_reg_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
qdev->shadow_reg_virt_addr = NULL;
}
}
@@ -3641,18 +3625,15 @@ static void ql_reset_work(struct work_struct *work)
if (tx_cb->skb) {
netdev_printk(KERN_DEBUG, ndev,
"Freeing lost SKB\n");
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[0],
- mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ DMA_TO_DEVICE);
for (j = 1; j < tx_cb->seg_count; j++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[j],
- mapaddr),
- dma_unmap_len(&tx_cb->map[j],
- maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[j], mapaddr),
+ dma_unmap_len(&tx_cb->map[j], maplen),
+ DMA_TO_DEVICE);
}
dev_kfree_skb(tx_cb->skb);
tx_cb->skb = NULL;
@@ -3784,13 +3765,10 @@ static int ql3xxx_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))))
pci_using_dac = 0;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
if (err) {
pr_err("%s no usable DMA configuration\n", pci_name(pdev));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c2faf96fcade..96b947fde646 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -520,8 +520,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qlcnic_features_check,
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a569abe7f5ef..fb67d8f797ec 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -28,6 +28,7 @@
#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
+#include <asm/unaligned.h>
#include <net/ip6_checksum.h>
#include "r8169.h"
@@ -260,6 +261,9 @@ enum rtl8168_8101_registers {
#define CSIAR_BYTE_ENABLE 0x0000f000
#define CSIAR_ADDR_MASK 0x00000fff
PMCH = 0x6f,
+#define D3COLD_NO_PLL_DOWN BIT(7)
+#define D3HOT_NO_PLL_DOWN BIT(6)
+#define D3_NO_PLL_DOWN (BIT(7) | BIT(6))
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
#define EPHYAR_WRITE_CMD 0x80000000
@@ -529,6 +533,9 @@ enum rtl_rx_desc_bit {
IPFail = (1 << 16), /* IP checksum failed */
UDPFail = (1 << 15), /* UDP/IP checksum failed */
TCPFail = (1 << 14), /* TCP/IP checksum failed */
+
+#define RxCSFailMask (IPFail | UDPFail | TCPFail)
+
RxVlanTag = (1 << 16), /* VLAN tag available */
};
@@ -584,6 +591,12 @@ enum rtl_flag {
RTL_FLAG_MAX
};
+enum rtl_dash_type {
+ RTL_DASH_NONE,
+ RTL_DASH_DP,
+ RTL_DASH_EP,
+};
+
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
@@ -591,6 +604,7 @@ struct rtl8169_private {
struct phy_device *phydev;
struct napi_struct napi;
enum mac_version mac_version;
+ enum rtl_dash_type dash_type;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_tx;
@@ -746,14 +760,75 @@ static const struct rtl_cond name = { \
\
static bool name ## _check(struct rtl8169_private *tp)
-static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
+static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
- if (reg & 0xffff0001) {
- if (net_ratelimit())
- netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg);
- return true;
- }
- return false;
+ /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
+ *cmd |= 0x7f0 << 18;
+}
+
+DECLARE_RTL_COND(rtl_eriar_cond)
+{
+ return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
+}
+
+static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
+{
+ u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
+
+ if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask))
+ return;
+
+ RTL_W32(tp, ERIDR, val);
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
+
+ rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
+}
+
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val)
+{
+ _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
+}
+
+static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
+{
+ u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
+
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
+
+ return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ RTL_R32(tp, ERIDR) : ~0;
+}
+
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
+{
+ return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
+}
+
+static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
+{
+ u32 val = rtl_eri_read(tp, addr);
+
+ rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
+}
+
+static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
+{
+ rtl_w0w1_eri(tp, addr, p, 0);
+}
+
+static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
+{
+ rtl_w0w1_eri(tp, addr, 0, m);
+}
+
+static bool rtl_ocp_reg_failure(u32 reg)
+{
+ return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg);
}
DECLARE_RTL_COND(rtl_ocp_gphy_cond)
@@ -763,7 +838,7 @@ DECLARE_RTL_COND(rtl_ocp_gphy_cond)
static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return;
RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
@@ -773,7 +848,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return 0;
RTL_W32(tp, GPHY_OCP, reg << 15);
@@ -784,7 +859,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return;
RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
@@ -792,7 +867,7 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return 0;
RTL_W32(tp, OCPDR, reg << 15);
@@ -808,6 +883,25 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
}
+/* Work around a hw issue with RTL8168g PHY, the quirk disables
+ * PHY MCU interrupts before PHY power-down.
+ */
+static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_49:
+ if (value & BMCR_RESET || !(value & BMCR_PDOWN))
+ rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
+ else
+ rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
+ break;
+ default:
+ break;
+ }
+};
+
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
if (reg == 0x1f) {
@@ -818,6 +912,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
if (tp->ocp_base != OCP_STD_PHY_BASE)
reg -= 0x10;
+ if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR)
+ rtl8168g_phy_suspend_quirk(tp, value);
+
r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
}
@@ -1009,70 +1106,6 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
-static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
-{
- /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
- *cmd |= 0x7f0 << 18;
-}
-
-DECLARE_RTL_COND(rtl_eriar_cond)
-{
- return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
-}
-
-static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val, int type)
-{
- u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
-
- BUG_ON((addr & 3) || (mask == 0));
- RTL_W32(tp, ERIDR, val);
- r8168fp_adjust_ocp_cmd(tp, &cmd, type);
- RTL_W32(tp, ERIAR, cmd);
-
- rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
-}
-
-static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val)
-{
- _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
-}
-
-static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
-{
- u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
-
- r8168fp_adjust_ocp_cmd(tp, &cmd, type);
- RTL_W32(tp, ERIAR, cmd);
-
- return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
- RTL_R32(tp, ERIDR) : ~0;
-}
-
-static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
-{
- return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
-}
-
-static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
-{
- u32 val = rtl_eri_read(tp, addr);
-
- rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
-}
-
-static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
-{
- rtl_w0w1_eri(tp, addr, p, 0);
-}
-
-static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
-{
- rtl_w0w1_eri(tp, addr, 0, m);
-}
-
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
{
RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
@@ -1158,19 +1191,10 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
+ if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
- break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ else
rtl8168ep_driver_start(tp);
- break;
- default:
- BUG();
- break;
- }
}
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
@@ -1189,44 +1213,51 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
+ if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
- break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ else
rtl8168ep_driver_stop(tp);
- break;
- default:
- BUG();
- break;
- }
}
static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(r8168dp_ocp_read(tp, reg) & 0x00008000);
+ return r8168dp_ocp_read(tp, reg) & BIT(15);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return r8168ep_ocp_read(tp, 0x128) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x128) & BIT(0);
}
-static bool r8168_check_dash(struct rtl8169_private *tp)
+static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
- return r8168dp_check_dash(tp);
+ return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
- return r8168ep_check_dash(tp);
+ return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
default:
- return false;
+ return RTL_DASH_NONE;
+ }
+}
+
+static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ if (enable)
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
+ else
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN);
+ break;
+ default:
+ break;
}
}
@@ -1396,6 +1427,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ rtl_set_d3_pll_down(tp, !wolopts);
tp->dev->wol_enabled = wolopts ? 1 : 0;
}
@@ -1962,7 +1994,11 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
/* 8168DP family. */
- { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ /* It seems this early RTL8168dp version never made it to
+ * the wild. Let's see whether somebody complains, if not
+ * we'll remove support for this chip version completely.
+ * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ */
{ 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
{ 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
@@ -2081,18 +2117,12 @@ static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
}
-static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
+static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
{
- const u16 w[] = {
- addr[0] | (addr[1] << 8),
- addr[2] | (addr[3] << 8),
- addr[4] | (addr[5] << 8)
- };
-
- rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
- rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
- rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
- rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
+ rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
+ rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4));
+ rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16);
+ rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2));
}
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
@@ -2142,14 +2172,14 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
genphy_soft_reset(tp->phydev);
}
-static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
+static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr)
{
rtl_unlock_config_regs(tp);
- RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
+ RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4));
rtl_pci_commit(tp);
- RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
+ RTL_W32(tp, MAC0, get_unaligned_le32(addr));
rtl_pci_commit(tp);
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
@@ -2172,28 +2202,16 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+static void rtl_wol_enable_rx(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_29:
- case RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63:
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
- break;
- default:
- break;
- }
}
-static void rtl_pll_power_down(struct rtl8169_private *tp)
+static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
- if (r8168_check_dash(tp))
+ if (tp->dash_type != RTL_DASH_NONE)
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
@@ -2202,66 +2220,10 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
if (device_may_wakeup(tp_to_dev(tp))) {
phy_speed_down(tp->phydev, false);
- rtl_wol_suspend_quirk(tp);
- return;
- }
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- default:
- break;
+ rtl_wol_enable_rx(tp);
}
}
-static void rtl_pll_power_up(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
- break;
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
- break;
- default:
- break;
- }
-
- phy_resume(tp->phydev);
-}
-
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -2340,13 +2302,14 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
static void rtl_jumbo_config(struct rtl8169_private *tp)
{
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
+ int readrq = 4096;
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ readrq = 512;
r8168b_1_hw_jumbo_enable(tp);
} else {
r8168b_1_hw_jumbo_disable(tp);
@@ -2354,7 +2317,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ readrq = 512;
r8168c_hw_jumbo_enable(tp);
} else {
r8168c_hw_jumbo_disable(tp);
@@ -2367,20 +2330,18 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
r8168dp_hw_jumbo_disable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ if (jumbo)
r8168e_hw_jumbo_enable(tp);
- } else {
+ else
r8168e_hw_jumbo_disable(tp);
- }
break;
default:
break;
}
rtl_lock_config_regs(tp);
- if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
- pcie_set_readrq(tp->pci_dev, 4096);
+ if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ pcie_set_readrq(tp->pci_dev, readrq);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4408,10 +4369,9 @@ static inline int rtl8169_fragmented_frame(u32 status)
static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
{
- u32 status = opts1 & RxProtoMask;
+ u32 status = opts1 & (RxProtoMask | RxCSFailMask);
- if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)))
+ if (status == RxProtoTCP || status == RxProtoUDP)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4618,12 +4578,12 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl8169_cleanup(tp, true);
- rtl_pll_power_down(tp);
+ rtl_prepare_power_down(tp);
}
static void rtl8169_up(struct rtl8169_private *tp)
{
- rtl_pll_power_up(tp);
+ phy_resume(tp->phydev);
rtl8169_init_phy(tp);
napi_enable(&tp->napi);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -4891,11 +4851,11 @@ static void rtl_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF) {
if (tp->saved_wolopts) {
- rtl_wol_suspend_quirk(tp);
+ rtl_wol_enable_rx(tp);
rtl_wol_shutdown_quirk(tp);
}
- pci_wake_from_d3(pdev, true);
+ pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
}
@@ -4909,7 +4869,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(tp->dev);
- if (r8168_check_dash(tp))
+ if (tp->dash_type != RTL_DASH_NONE)
rtl8168_driver_stop(tp);
rtl_release_firmware(tp);
@@ -4977,16 +4937,12 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
{
/* Get MAC address */
if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
- u32 value = rtl_eri_read(tp, 0xe0);
-
- mac_addr[0] = (value >> 0) & 0xff;
- mac_addr[1] = (value >> 8) & 0xff;
- mac_addr[2] = (value >> 16) & 0xff;
- mac_addr[3] = (value >> 24) & 0xff;
+ u32 value;
+ value = rtl_eri_read(tp, 0xe0);
+ put_unaligned_le32(value, mac_addr);
value = rtl_eri_read(tp, 0xe4);
- mac_addr[4] = (value >> 0) & 0xff;
- mac_addr[5] = (value >> 8) & 0xff;
+ put_unaligned_le16(value, mac_addr + 4);
} else if (rtl_is_8125(tp)) {
rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
}
@@ -5267,12 +5223,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
if (chipset == RTL_GIGA_MAC_NONE) {
- dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
+ dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid);
return -ENODEV;
}
tp->mac_version = chipset;
+ tp->dash_type = rtl_check_dash(tp);
+
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
@@ -5342,6 +5300,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
+ rtl_set_d3_pll_down(tp, true);
+
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
dev->max_mtu = jumbo_max;
@@ -5362,9 +5322,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* chip gets powered up in rtl_open() */
- rtl_pll_power_down(tp);
-
rc = register_netdev(dev);
if (rc)
return rc;
@@ -5378,7 +5335,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
"ok" : "ko");
- if (r8168_check_dash(tp)) {
+ if (tp->dash_type != RTL_DASH_NONE) {
netdev_info(dev, "DASH enabled\n");
rtl8168_driver_start(tp);
}
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 7453b17a37a2..cb47e68c1a3e 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -165,7 +165,7 @@ enum ravb_reg {
GTO2 = 0x03A8,
GIC = 0x03AC,
GIS = 0x03B0,
- GCPT = 0x03B4, /* Undocumented? */
+ GCPT = 0x03B4, /* Documented for R-Car Gen3 only */
GCT0 = 0x03B8,
GCT1 = 0x03BC,
GCT2 = 0x03C0,
@@ -225,7 +225,7 @@ enum CSR_BIT {
CSR_OPS_RESET = 0x00000001,
CSR_OPS_CONFIG = 0x00000002,
CSR_OPS_OPERATION = 0x00000004,
- CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_OPS_STANDBY = 0x00000008, /* Documented for R-Car Gen3 only */
CSR_DTS = 0x00000100,
CSR_TPO0 = 0x00010000,
CSR_TPO1 = 0x00020000,
@@ -241,13 +241,12 @@ enum ESR_BIT {
ESR_EIL = 0x00001000,
};
-/* APSR */
+/* APSR (R-Car Gen3 only) */
enum APSR_BIT {
- APSR_MEMS = 0x00000002,
- APSR_CMSW = 0x00000010,
- APSR_DM = 0x00006000, /* Undocumented? */
- APSR_DM_RDM = 0x00002000,
- APSR_DM_TDM = 0x00004000,
+ APSR_MEMS = 0x00000002, /* Undocumented */
+ APSR_CMSW = 0x00000010,
+ APSR_RDM = 0x00002000,
+ APSR_TDM = 0x00004000,
};
/* RCR */
@@ -530,16 +529,16 @@ enum RIS2_BIT {
/* TIC */
enum TIC_BIT {
- TIC_FTE0 = 0x00000001, /* Undocumented? */
- TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_FTE0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIC_FTE1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIC_TFUE = 0x00000100,
TIC_TFWE = 0x00000200,
};
/* TIS */
enum TIS_BIT {
- TIS_FTF0 = 0x00000001, /* Undocumented? */
- TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_FTF0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIS_FTF1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
@@ -547,8 +546,8 @@ enum TIS_BIT {
/* ISS */
enum ISS_BIT {
- ISS_FRS = 0x00000001, /* Undocumented? */
- ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_FRS = 0x00000001, /* Documented for R-Car Gen3 only */
+ ISS_FTS = 0x00000004, /* Documented for R-Car Gen3 only */
ISS_ES = 0x00000040,
ISS_MS = 0x00000080,
ISS_TFUS = 0x00000100,
@@ -608,13 +607,13 @@ enum GTI_BIT {
/* GIC */
enum GIC_BIT {
- GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTCE = 0x00000001, /* Documented for R-Car Gen3 only */
GIC_PTME = 0x00000004,
};
/* GIS */
enum GIS_BIT {
- GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTCF = 0x00000001, /* Documented for R-Car Gen3 only */
GIS_PTMF = 0x00000004,
GIS_RESERVED = GENMASK(15, 10),
};
@@ -808,10 +807,10 @@ enum ECMR_BIT {
ECMR_TE = 0x00000020,
ECMR_RE = 0x00000040,
ECMR_MPDE = 0x00000200,
- ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */
ECMR_RXF = 0x00020000,
ECMR_PFR = 0x00040000,
- ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */
ECMR_RZPF = 0x00100000,
ECMR_DPAD = 0x00200000,
ECMR_RCSC = 0x00800000,
@@ -830,7 +829,7 @@ enum ECSR_BIT {
enum ECSIPR_BIT {
ECSIPR_ICDIP = 0x00000001,
ECSIPR_MPDIP = 0x00000002,
- ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+ ECSIPR_LCHNGIP = 0x00000004,
};
/* PIR */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index bd30505fbc57..eb0c03bdb12d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2034,10 +2034,10 @@ static void ravb_set_delay_mode(struct net_device *ndev)
u32 set = 0;
if (priv->rxcidm)
- set |= APSR_DM_RDM;
+ set |= APSR_RDM;
if (priv->txcidm)
- set |= APSR_DM_TDM;
- ravb_modify(ndev, APSR, APSR_DM, set);
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
}
static int ravb_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 6fad25321dc5..315a6e5c0f59 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -103,15 +103,13 @@ struct rocker_world_ops {
int (*port_attr_stp_state_set)(struct rocker_port *rocker_port,
u8 state);
int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans);
+ unsigned long brport_flags);
int (*port_attr_bridge_flags_support_get)(const struct rocker_port *
rocker_port,
unsigned long *
p_brport_flags);
int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans);
+ u32 ageing_time);
int (*port_obj_vlan_add)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan);
int (*port_obj_vlan_del)(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index dd0bc7f0aaee..740a715c49c6 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1550,17 +1550,13 @@ static void rocker_world_port_stop(struct rocker_port *rocker_port)
}
static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
- u8 state,
- struct switchdev_trans *trans)
+ u8 state)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_stp_state_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return wops->port_attr_stp_state_set(rocker_port, state);
}
@@ -1580,8 +1576,7 @@ rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ unsigned long brport_flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
unsigned long brport_flags_s;
@@ -1603,52 +1598,37 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
static int
rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ unsigned long brport_flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
- trans);
+ return wops->port_attr_bridge_flags_set(rocker_port, brport_flags);
}
static int
rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans)
-
+ u32 ageing_time)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_ageing_time_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
- trans);
+ return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time);
}
static int
rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return wops->port_obj_vlan_add(rocker_port, vlan);
}
@@ -2066,8 +2046,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
********************/
static int rocker_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
@@ -2075,23 +2054,19 @@ static int rocker_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = rocker_world_port_attr_stp_state_set(rocker_port,
- attr->u.stp_state,
- trans);
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
- attr->u.brport_flags,
- trans);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = rocker_world_port_attr_bridge_flags_set(rocker_port,
- attr->u.brport_flags,
- trans);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
- attr->u.ageing_time,
- trans);
+ attr->u.ageing_time);
break;
default:
err = -EOPNOTSUPP;
@@ -2102,8 +2077,7 @@ static int rocker_port_attr_set(struct net_device *dev,
}
static int rocker_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ const struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
@@ -2111,8 +2085,7 @@ static int rocker_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = rocker_world_port_obj_vlan_add(rocker_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -2725,8 +2698,7 @@ rocker_switchdev_port_attr_set_event(struct net_device *netdev,
{
int err;
- err = rocker_port_attr_set(netdev, port_attr_info->attr,
- port_attr_info->trans);
+ err = rocker_port_attr_set(netdev, port_attr_info->attr);
port_attr_info->handled = true;
return notifier_from_errno(err);
@@ -2847,8 +2819,7 @@ rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = rocker_port_obj_add(netdev, port_obj_info->obj,
- port_obj_info->trans);
+ err = rocker_port_obj_add(netdev, port_obj_info->obj);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = rocker_port_obj_del(netdev, port_obj_info->obj);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7072b249c8bd..d067da1ef070 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2488,8 +2488,7 @@ static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
}
static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ unsigned long brport_flags)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
unsigned long orig_flags;
@@ -2497,14 +2496,11 @@ static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
orig_flags = ofdpa_port->brport_flags;
ofdpa_port->brport_flags = brport_flags;
- if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
- !switchdev_trans_ph_prepare(trans))
+
+ if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING)
err = rocker_port_set_learning(ofdpa_port->rocker_port,
!!(ofdpa_port->brport_flags & BR_LEARNING));
- if (switchdev_trans_ph_prepare(trans))
- ofdpa_port->brport_flags = orig_flags;
-
return err;
}
@@ -2520,18 +2516,15 @@ ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans)
+ u32 ageing_time)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
- if (!switchdev_trans_ph_prepare(trans)) {
- ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
- if (ofdpa_port->ageing_time < ofdpa->ageing_time)
- ofdpa->ageing_time = ofdpa_port->ageing_time;
- mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
- }
+ ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ if (ofdpa_port->ageing_time < ofdpa->ageing_time)
+ ofdpa->ageing_time = ofdpa_port->ageing_time;
+ mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
return 0;
}
@@ -2540,32 +2533,16 @@ static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
- if (err)
- return err;
- }
- return 0;
+ return ofdpa_port_vlan_add(ofdpa_port, vlan->vid, vlan->flags);
}
static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
- if (err)
- return err;
- }
- return 0;
+ return ofdpa_port_vlan_del(ofdpa_port, vlan->vid, vlan->flags);
}
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 718308076341..36c8625a6fd7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -612,8 +612,6 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_xdp_xmit = efx_xdp_xmit,
.ndo_bpf = efx_xdp
};
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index aaa112877561..89c5c75f479f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -293,14 +293,10 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
efx->rx_prefix_size);
- xdp.data = *ehp;
- xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
-
+ xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
/* No support yet for XDP metadata */
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + rx_buf->len;
- xdp.rxq = &rx_queue->xdp_rxq_info;
- xdp.frame_sz = efx->rx_page_buf_step;
+ xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
+ rx_buf->len, false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 742a1f7a838c..891b49281bc6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2191,7 +2191,7 @@ static const struct of_device_id smc91x_match[] = {
MODULE_DEVICE_TABLE(of, smc91x_match);
/**
- * of_try_set_control_gpio - configure a gpio if it exists
+ * try_toggle_control_gpio - configure a gpio if it exists
* @dev: net device
* @desc: where to store the GPIO descriptor, if it exists
* @name: name of the GPIO in DT
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 823d9a7184fe..606c79de93a6 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -557,6 +557,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
unsigned int addr;
int i, reg;
+ pm_runtime_get_sync(bus->parent);
spin_lock_irqsave(&pdata->mac_lock, flags);
/* Confirm MII not busy */
@@ -582,6 +583,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
out:
spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ pm_runtime_put(bus->parent);
return reg;
}
@@ -594,6 +596,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
unsigned int addr;
int i, reg;
+ pm_runtime_get_sync(bus->parent);
spin_lock_irqsave(&pdata->mac_lock, flags);
/* Confirm MII not busy */
@@ -623,6 +626,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
out:
spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ pm_runtime_put(bus->parent);
return reg;
}
@@ -1589,6 +1593,8 @@ static int smsc911x_open(struct net_device *dev)
int retval;
int irq_flags;
+ pm_runtime_get_sync(dev->dev.parent);
+
/* find and start the given phy */
if (!dev->phydev) {
retval = smsc911x_mii_probe(dev);
@@ -1735,6 +1741,7 @@ mii_free_out:
phy_disconnect(dev->phydev);
dev->phydev = NULL;
out:
+ pm_runtime_put(dev->dev.parent);
return retval;
}
@@ -1766,6 +1773,7 @@ static int smsc911x_stop(struct net_device *dev)
dev->phydev = NULL;
}
netif_carrier_off(dev);
+ pm_runtime_put(dev->dev.parent);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -2334,7 +2342,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
free_netdev(dev);
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -2540,6 +2547,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
}
spin_unlock_irq(&pdata->mac_lock);
+ pm_runtime_put(&pdev->dev);
netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 19d20a6d0d44..3c53051bdacf 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -956,8 +956,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u32 xdp_act = 0;
int done = 0;
- xdp.rxq = &dring->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE;
+ xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
@@ -1016,10 +1015,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
dma_dir);
prefetch(desc->addr);
- xdp.data_hard_start = desc->addr;
- xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + pkt_len;
+ xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
+ pkt_len, false);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index f184b00f5116..55152d7ba99a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -68,10 +68,21 @@
*/
#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+#define PRG_ETH1 0x4
+
+/* Defined for adding a delay to the input RX_CLK for better timing.
+ * Each step is 200ps. These bits are used with external RGMII PHYs
+ * because RGMII RX only has the small window. cfg_rxclk_dly can
+ * adjust the window between RX_CLK and RX_DATA and improve the stability
+ * of "rx data valid".
+ */
+#define PRG_ETH1_CFG_RXCLK_DLY GENMASK(19, 16)
+
struct meson8b_dwmac;
struct meson8b_dwmac_data {
int (*set_phy_mode)(struct meson8b_dwmac *dwmac);
+ bool has_prg_eth1_rgmii_rx_delay;
};
struct meson8b_dwmac {
@@ -82,7 +93,7 @@ struct meson8b_dwmac {
phy_interface_t phy_mode;
struct clk *rgmii_tx_clk;
u32 tx_delay_ns;
- u32 rx_delay_ns;
+ u32 rx_delay_ps;
struct clk *timing_adj_clk;
};
@@ -268,32 +279,37 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
return 0;
}
-static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)
{
- u32 tx_dly_config, rx_dly_config, delay_config;
+ u32 tx_dly_config, rx_adj_config, cfg_rxclk_dly, delay_config;
int ret;
+ rx_adj_config = 0;
+ cfg_rxclk_dly = 0;
tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
dwmac->tx_delay_ns >> 1);
- if (dwmac->rx_delay_ns == 2)
- rx_dly_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
- else
- rx_dly_config = 0;
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay)
+ cfg_rxclk_dly = FIELD_PREP(PRG_ETH1_CFG_RXCLK_DLY,
+ dwmac->rx_delay_ps / 200);
+ else if (dwmac->rx_delay_ps == 2000)
+ rx_adj_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
- delay_config = tx_dly_config | rx_dly_config;
+ delay_config = tx_dly_config | rx_adj_config;
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
delay_config = tx_dly_config;
+ cfg_rxclk_dly = 0;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
- delay_config = rx_dly_config;
+ delay_config = rx_adj_config;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RMII:
delay_config = 0;
+ cfg_rxclk_dly = 0;
break;
default:
dev_err(dwmac->dev, "unsupported phy-mode %s\n",
@@ -301,7 +317,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return -EINVAL;
}
- if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
+ if (delay_config & PRG_ETH0_ADJ_ENABLE) {
if (!dwmac->timing_adj_clk) {
dev_err(dwmac->dev,
"The timing-adjustment clock is mandatory for the RX delay re-timing\n");
@@ -323,6 +339,16 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
delay_config);
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH1, PRG_ETH1_CFG_RXCLK_DLY,
+ cfg_rxclk_dly);
+
+ return 0;
+}
+
+static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+{
+ int ret;
+
if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
@@ -406,16 +432,30 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
- /* use 0ns as fallback since this is what most boards actually use */
- if (of_property_read_u32(pdev->dev.of_node, "amlogic,rx-delay-ns",
- &dwmac->rx_delay_ns))
- dwmac->rx_delay_ns = 0;
+ /* RX delay defaults to 0ps since this is what many boards use */
+ if (of_property_read_u32(pdev->dev.of_node, "rx-internal-delay-ps",
+ &dwmac->rx_delay_ps)) {
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ps))
+ /* convert ns to ps */
+ dwmac->rx_delay_ps *= 1000;
+ }
- if (dwmac->rx_delay_ns != 0 && dwmac->rx_delay_ns != 2) {
- dev_err(&pdev->dev,
- "The only allowed RX delays values are: 0ns, 2ns");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay) {
+ if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) {
+ dev_err(dwmac->dev,
+ "The only allowed RGMII RX delays values are: 0ps, 2000ps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ } else {
+ if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) {
+ dev_err(dwmac->dev,
+ "The RGMII RX delay range is 0..3000ps in 200ps steps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
}
dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
@@ -425,6 +465,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ ret = meson8b_init_rgmii_delays(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -453,10 +497,17 @@ err_remove_config_dt:
static const struct meson8b_dwmac_data meson8b_dwmac_data = {
.set_phy_mode = meson8b_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
};
static const struct meson8b_dwmac_data meson_axg_dwmac_data = {
.set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
+};
+
+static const struct meson8b_dwmac_data meson_g12a_dwmac_data = {
+ .set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = true,
};
static const struct of_device_id meson8b_dwmac_match[] = {
@@ -478,7 +529,7 @@ static const struct of_device_id meson8b_dwmac_match[] = {
},
{
.compatible = "amlogic,meson-g12a-dwmac",
- .data = &meson_axg_dwmac_data,
+ .data = &meson_g12a_dwmac_data,
},
{ }
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 766e8866bbef..1850743c04da 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -366,8 +366,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_err(dev, "Failed to map rx skb buffer\n");
return -EINVAL;
@@ -375,6 +376,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = skb;
@@ -691,8 +693,9 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
- dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_kfree_skb_any(skb);
@@ -779,6 +782,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
@@ -793,7 +797,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
- dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
@@ -864,7 +868,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
- struct device *dev,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
@@ -875,20 +878,23 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
next_desc = first_desc;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_single(dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
while (next_desc_dma) {
next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
next_desc_dma);
cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_page(dev, buf_dma, buf_dma_len,
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
@@ -906,7 +912,7 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
dev_kfree_skb_any(skb);
}
@@ -926,7 +932,7 @@ am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
ndev = skb->dev;
@@ -1119,9 +1125,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
netif_txq = netdev_get_tx_queue(ndev, q_idx);
/* Map the linear buffer */
- buf_dma = dma_map_single(dev, skb->data, pkt_len,
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb buffer\n");
ndev->stats.tx_errors++;
goto err_free_skb;
@@ -1130,7 +1136,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (!first_desc) {
dev_dbg(dev, "Failed to allocate descriptor\n");
- dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
+ DMA_TO_DEVICE);
goto busy_stop_q;
}
@@ -1140,6 +1147,7 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
cppi5_hdesc_set_pkttype(first_desc, 0x7);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
*(swdata) = skb;
@@ -1175,9 +1183,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
- buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb page\n");
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
ndev->stats.tx_errors++;
@@ -1185,11 +1193,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
}
cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(next_desc,
buf_dma, frag_size, buf_dma, frag_size);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
pkt_len += frag_size;
@@ -1237,14 +1247,14 @@ done_tx:
return NETDEV_TX_OK;
err_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
err_free_skb:
ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
busy_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
busy_stop_q:
netif_tx_stop_queue(netif_txq);
return NETDEV_TX_BUSY;
@@ -1545,16 +1555,6 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
tx_chn->common = common;
tx_chn->id = i;
tx_chn->descs_num = max_desc_num;
- tx_chn->desc_pool =
- k3_cppi_desc_pool_create_name(dev,
- tx_chn->descs_num,
- hdesc_size,
- tx_chn->tx_chn_name);
- if (IS_ERR(tx_chn->desc_pool)) {
- ret = PTR_ERR(tx_chn->desc_pool);
- dev_err(dev, "Failed to create poll %d\n", ret);
- goto err;
- }
tx_chn->tx_chn =
k3_udma_glue_request_tx_chn(dev,
@@ -1565,6 +1565,17 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
"Failed to request tx dma channel\n");
goto err;
}
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+
+ tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
+ goto err;
+ }
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq <= 0) {
@@ -1622,14 +1633,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
/* init all flows */
rx_chn->dev = dev;
rx_chn->descs_num = max_desc_num;
- rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
- rx_chn->descs_num,
- hdesc_size, "rx");
- if (IS_ERR(rx_chn->desc_pool)) {
- ret = PTR_ERR(rx_chn->desc_pool);
- dev_err(dev, "Failed to create rx poll %d\n", ret);
- goto err;
- }
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
@@ -1637,6 +1640,16 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
"Failed to request rx dma channel\n");
goto err;
}
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size, "rx");
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ dev_err(dev, "Failed to create rx poll %d\n", ret);
+ goto err;
+ }
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
@@ -2102,9 +2115,16 @@ static const struct am65_cpsw_pdata j721e_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
+static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
+ { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2164,12 +2184,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->tx_ch_num = 1;
common->pf_p0_rx_ptype_rrobin = false;
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "error setting dma mask: %d\n", ret);
- return ret;
- }
-
common->ports = devm_kcalloc(dev, common->port_num,
sizeof(*common->ports),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 02aed4c0ceba..d7f8a0f76fdc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -56,6 +56,7 @@ struct am65_cpsw_host {
};
struct am65_cpsw_tx_chn {
+ struct device *dma_dev;
struct napi_struct napi_tx;
struct am65_cpsw_common *common;
struct k3_cppi_desc_pool *desc_pool;
@@ -69,6 +70,7 @@ struct am65_cpsw_tx_chn {
struct am65_cpsw_rx_chn {
struct device *dev;
+ struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
u32 descs_num;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 3bdd4dbcd2ff..ebcc6386cc34 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -356,7 +356,7 @@ static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
}
-/**
+/*
* Enable ESTf periodic output, set cycle start time and interval.
*/
static int am65_cpsw_timer_set(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 5dc60ecabe56..9caaae79fc95 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -727,7 +727,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
/**
* am65_cpts_rx_enable - enable rx timestamping
* @cpts: cpts handle
- * @skb: packet
+ * @en: enable
*
* This functions enables rx packets timestamping. The CPTS can timestamp all
* rx packets.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b0f00b4edd94..5239318e9686 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -392,21 +392,15 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
+ int headroom = CPSW_HEADROOM, size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
- xdp.data = pa + CPSW_HEADROOM +
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- xdp.data_end = xdp.data + len -
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- } else {
- xdp.data = pa + CPSW_HEADROOM;
- xdp.data_end = xdp.data + len;
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_set_data_meta_invalid(&xdp);
-
- xdp.data_hard_start = pa;
- xdp.rxq = &priv->xdp_rxq[ch];
- xdp.frame_sz = PAGE_SIZE;
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index cdc308a2aa3e..d828f856237a 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1256,6 +1256,13 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.major_ver_mask = 0x7,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
+ {
+ .dev_id = "am64-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ .tbl_entries = 512,
+ },
{ },
};
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 2f5e0ad23ad7..94747f82c60b 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -335,21 +335,15 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
+ int headroom = CPSW_HEADROOM, size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
- xdp.data = pa + CPSW_HEADROOM +
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- xdp.data_end = xdp.data + len -
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- } else {
- xdp.data = pa + CPSW_HEADROOM;
- xdp.data_end = xdp.data + len;
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_set_data_meta_invalid(&xdp);
-
- xdp.data_hard_start = pa;
- xdp.rxq = &priv->xdp_rxq[ch];
- xdp.frame_sz = PAGE_SIZE;
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
if (ret != CPSW_XDP_PASS)
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 29747da5c514..9967cf985728 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -24,16 +24,12 @@ struct cpsw_switchdev_event_work {
unsigned long event;
};
-static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
- struct switchdev_trans *trans, u8 state)
+static int cpsw_port_stp_state_set(struct cpsw_priv *priv, u8 state)
{
struct cpsw_common *cpsw = priv->cpsw;
u8 cpsw_state;
int ret = 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
switch (state) {
case BR_STATE_FORWARDING:
cpsw_state = ALE_PORT_STATE_FORWARD;
@@ -60,16 +56,12 @@ static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
}
static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
unsigned long brport_flags)
{
struct cpsw_common *cpsw = priv->cpsw;
bool unreg_mcast_add = false;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (brport_flags & BR_MCAST_FLOOD)
unreg_mcast_add = true;
dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
@@ -82,7 +74,6 @@ static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
}
static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_trans *trans,
unsigned long flags)
{
if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
@@ -92,8 +83,7 @@ static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
}
static int cpsw_port_attr_set(struct net_device *ndev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
@@ -102,15 +92,15 @@ static int cpsw_port_attr_set(struct net_device *ndev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- ret = cpsw_port_attr_br_flags_pre_set(ndev, trans,
+ ret = cpsw_port_attr_br_flags_pre_set(ndev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state);
+ ret = cpsw_port_stp_state_set(priv, attr->u.stp_state);
dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev,
+ ret = cpsw_port_attr_br_flags_set(priv, attr->orig_dev,
attr->u.brport_flags);
break;
default:
@@ -253,56 +243,24 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
}
static int cpsw_port_vlans_add(struct cpsw_priv *priv,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
- priv->ndev->name, vlan->vid_begin, vlan->flags);
+ priv->ndev->name, vlan->vid, vlan->flags);
if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
return 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int cpsw_port_vlans_del(struct cpsw_priv *priv,
- const struct switchdev_obj_port_vlan *vlan)
-
-{
- struct net_device *orig_dev = vlan->obj.orig_dev;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = cpsw_port_vlan_del(priv, vid, orig_dev);
- if (err)
- return err;
- }
-
- return 0;
+ return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
}
static int cpsw_port_mdb_add(struct cpsw_priv *priv,
- struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ struct switchdev_obj_port_mdb *mdb)
{
struct net_device *orig_dev = mdb->obj.orig_dev;
@@ -311,9 +269,6 @@ static int cpsw_port_mdb_add(struct cpsw_priv *priv,
int port_mask;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (cpu_port)
port_mask = BIT(HOST_PORT_NUM);
else
@@ -352,7 +307,6 @@ static int cpsw_port_mdb_del(struct cpsw_priv *priv,
static int cpsw_port_obj_add(struct net_device *ndev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
@@ -365,11 +319,11 @@ static int cpsw_port_obj_add(struct net_device *ndev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = cpsw_port_vlans_add(priv, vlan, trans);
+ err = cpsw_port_vlans_add(priv, vlan);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
- err = cpsw_port_mdb_add(priv, mdb, trans);
+ err = cpsw_port_mdb_add(priv, mdb);
break;
default:
err = -EOPNOTSUPP;
@@ -392,7 +346,7 @@ static int cpsw_port_obj_del(struct net_device *ndev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = cpsw_port_vlans_del(priv, vlan);
+ err = cpsw_port_vlan_del(priv, vlan->vid, vlan->obj.orig_dev);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 3d1fc8d2ca66..55e652624bd7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1100,7 +1100,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
return packets_done;
}
-/**
+/*
* gelic_card_interrupt - event handler for gelic_net
*/
static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
@@ -1400,6 +1400,7 @@ out:
/**
* gelic_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
+ * @txqueue: unused
*
* called, if tx hangs. Schedules a task that resets the interface
*/
@@ -1431,6 +1432,7 @@ static const struct net_device_ops gelic_netdevice_ops = {
/**
* gelic_ether_setup_netdev_ops - initialization of net_device operations
* @netdev: net_device structure
+ * @napi: napi structure
*
* fills out function pointers in the net_device structure
*/
@@ -1632,7 +1634,7 @@ static void gelic_card_get_vlan_info(struct gelic_card *card)
dev_info(ctodev(card), "internal vlan %s\n",
card->vlan_required? "enabled" : "disabled");
}
-/**
+/*
* ps3_gelic_driver_probe - add a device to the control of this driver
*/
static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
@@ -1787,7 +1789,7 @@ fail_open:
return result;
}
-/**
+/*
* ps3_gelic_driver_remove - remove a device from the control of this driver
*/
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5f5b33e6653b..d5a75ef7e3ca 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -254,7 +254,7 @@ spider_net_set_promisc(struct spider_net_card *card)
/**
* spider_net_get_descr_status -- returns the status of a descriptor
- * @descr: descriptor to look at
+ * @hwdescr: descriptor to look at
*
* returns the status as in the dmac_cmd_status field of the descriptor
*/
@@ -542,6 +542,7 @@ error:
/**
* spider_net_get_multicast_hash - generates hash for multicast filter table
+ * @netdev: interface device structure
* @addr: multicast address
*
* returns the hash value.
@@ -890,7 +891,7 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/**
* spider_net_cleanup_tx_ring - cleans up the TX ring
- * @card: card structure
+ * @t: timer context used to obtain the pointer to net card data structure
*
* spider_net_cleanup_tx_ring is called by either the tx_timer
* or from the NAPI polling routine.
@@ -1063,6 +1064,7 @@ static void show_rx_chain(struct spider_net_card *card)
/**
* spider_net_resync_head_ptr - Advance head ptr past empty descrs
+ * @card: card structure
*
* If the driver fails to keep up and empty the queue, then the
* hardware wil run out of room to put incoming packets. This
@@ -1220,7 +1222,7 @@ bad_desc:
/**
* spider_net_poll - NAPI poll function called by the stack to return packets
- * @netdev: interface device structure
+ * @napi: napi device structure
* @budget: number of packets we can pass to the stack at most
*
* returns 0 if no more packets available to the driver/stack. Returns 1,
@@ -1268,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
/**
* spider_net_set_mac - sets the MAC of an interface
* @netdev: interface device structure
- * @ptr: pointer to new MAC address
+ * @p: pointer to new MAC address
*
* Returns 0 on success, <0 on failure. Currently, we don't support this
* and will always return EOPNOTSUPP.
@@ -1340,6 +1342,8 @@ spider_net_link_reset(struct net_device *netdev)
* spider_net_handle_error_irq - handles errors raised by an interrupt
* @card: card structure
* @status_reg: interrupt status register 0 (GHIINT0STS)
+ * @error_reg1: interrupt status register 1 (GHIINT1STS)
+ * @error_reg2: interrupt status register 2 (GHIINT2STS)
*
* spider_net_handle_error_irq treats or ignores all error conditions
* found when an interrupt is presented
@@ -1961,8 +1965,7 @@ init_firmware_failed:
/**
* spider_net_link_phy
- * @data: used for pointer to card structure
- *
+ * @t: timer context used to obtain the pointer to net card data structure
*/
static void spider_net_link_phy(struct timer_list *t)
{
@@ -2140,7 +2143,7 @@ spider_net_stop(struct net_device *netdev)
/**
* spider_net_tx_timeout_task - task scheduled by the watchdog timeout
* function (to be called not under interrupt status)
- * @data: data, is interface device structure
+ * @work: work context used to obtain the pointer to net card data structure
*
* called as task when tx hangs, resets interface (if interface is up)
*/
@@ -2174,6 +2177,7 @@ out:
/**
* spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
+ * @txqueue: unused
*
* called, if tx hangs. Schedules a task that resets the interface
*/
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 2e5202923510..0152f1e70783 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -247,7 +247,7 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
}
#endif
-static spinlock_t mdio_lock;
+static DEFINE_SPINLOCK(mdio_lock);
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
static struct mii_bus *mdio_bus;
static int ports_open;
@@ -528,7 +528,6 @@ static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
mdio_regs = regs;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
- spin_lock_init(&mdio_lock);
mdio_bus->name = "IXP4xx MII Bus";
mdio_bus->read = &ixp4xx_mdio_read;
mdio_bus->write = &ixp4xx_mdio_write;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5523f069b9a5..4ac0373326ef 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1197,11 +1197,12 @@ static void geneve_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &geneve_type);
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
/* MTU range: 68 - (something less than 65535) */
@@ -1851,16 +1852,10 @@ static int geneve_netdevice_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
- event == NETDEV_UDP_TUNNEL_DROP_INFO) {
- geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
- } else if (event == NETDEV_UNREGISTER) {
- if (!dev->udp_tunnel_nic_info)
- geneve_offload_rx_ports(dev, false);
- } else if (event == NETDEV_REGISTER) {
- if (!dev->udp_tunnel_nic_info)
- geneve_offload_rx_ports(dev, true);
- }
+ if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
+ geneve_offload_rx_ports(dev, true);
+ else if (event == NETDEV_UDP_TUNNEL_DROP_INFO)
+ geneve_offload_rx_ports(dev, false);
return NOTIFY_DONE;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4c04e271f184..851364314ecc 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -21,6 +21,7 @@
#include <linux/file.h>
#include <linux/gtp.h>
+#include <net/dst_metadata.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
@@ -73,6 +74,9 @@ struct gtp_dev {
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
+ /* Used by LWT tunnel. */
+ bool collect_md;
+ struct socket *collect_md_sock;
};
static unsigned int gtp_net_id __read_mostly;
@@ -179,33 +183,121 @@ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
return false;
}
-static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
- unsigned int hdrlen, unsigned int role)
+static int gtp_set_tun_dst(struct gtp_dev *gtp, struct sk_buff *skb,
+ unsigned int hdrlen, u8 gtp_version,
+ __be64 tid, u8 flags)
{
- if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
- netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
- return 1;
+ struct metadata_dst *tun_dst;
+ int opts_len = 0;
+
+ if (unlikely(flags & GTP1_F_MASK))
+ opts_len = sizeof(struct gtpu_metadata);
+
+ tun_dst = udp_tun_rx_dst(skb, gtp->sk1u->sk_family, TUNNEL_KEY, tid, opts_len);
+ if (!tun_dst) {
+ netdev_dbg(gtp->dev, "Failed to allocate tun_dst");
+ goto err;
}
+ netdev_dbg(gtp->dev, "attaching metadata_dst to skb, gtp ver %d hdrlen %d\n",
+ gtp_version, hdrlen);
+ if (unlikely(opts_len)) {
+ struct gtpu_metadata *opts;
+ struct gtp1_header *gtp1;
+
+ opts = ip_tunnel_info_opts(&tun_dst->u.tun_info);
+ gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+ opts->ver = GTP_METADATA_V1;
+ opts->flags = gtp1->flags;
+ opts->type = gtp1->type;
+ netdev_dbg(gtp->dev, "recved control pkt: flag %x type: %d\n",
+ opts->flags, opts->type);
+ tun_dst->u.tun_info.key.tun_flags |= TUNNEL_GTPU_OPT;
+ tun_dst->u.tun_info.options_len = opts_len;
+ skb->protocol = htons(0xffff); /* Unknown */
+ }
/* Get rid of the GTP + UDP headers. */
if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
- !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
- return -1;
+ !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)))) {
+ gtp->dev->stats.rx_length_errors++;
+ goto err;
+ }
+
+ skb_dst_set(skb, &tun_dst->dst);
+ return 0;
+err:
+ return -1;
+}
+
+static int gtp_rx(struct gtp_dev *gtp, struct sk_buff *skb,
+ unsigned int hdrlen, u8 gtp_version, unsigned int role,
+ __be64 tid, u8 flags, u8 type)
+{
+ if (ip_tunnel_collect_metadata() || gtp->collect_md) {
+ int err;
+
+ err = gtp_set_tun_dst(gtp, skb, hdrlen, gtp_version, tid, flags);
+ if (err)
+ goto err;
+ } else {
+ struct pdp_ctx *pctx;
+
+ if (flags & GTP1_F_MASK)
+ hdrlen += 4;
- netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
+ if (type != GTP_TPDU)
+ return 1;
+
+ if (gtp_version == GTP_V0)
+ pctx = gtp0_pdp_find(gtp, be64_to_cpu(tid));
+ else
+ pctx = gtp1_pdp_find(gtp, be64_to_cpu(tid));
+ if (!pctx) {
+ netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+ return 1;
+ }
+
+ if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
+ netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
+ return 1;
+ }
+ /* Get rid of the GTP + UDP headers. */
+ if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
+ !net_eq(sock_net(pctx->sk), dev_net(gtp->dev)))) {
+ gtp->dev->stats.rx_length_errors++;
+ goto err;
+ }
+ }
+ netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
/* Now that the UDP and the GTP header have been removed, set up the
* new network header. This is required by the upper layer to
* calculate the transport header.
*/
skb_reset_network_header(skb);
+ if (pskb_may_pull(skb, sizeof(struct iphdr))) {
+ struct iphdr *iph;
+
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ netdev_dbg(gtp->dev, "inner pkt: ipv4");
+ skb->protocol = htons(ETH_P_IP);
+ } else if (iph->version == 6) {
+ netdev_dbg(gtp->dev, "inner pkt: ipv6");
+ skb->protocol = htons(ETH_P_IPV6);
+ } else {
+ netdev_dbg(gtp->dev, "inner pkt error: Unknown type");
+ }
+ }
- skb->dev = pctx->dev;
-
- dev_sw_netstats_rx_add(pctx->dev, skb->len);
-
+ skb->dev = gtp->dev;
+ dev_sw_netstats_rx_add(gtp->dev, skb->len);
netif_rx(skb);
return 0;
+
+err:
+ gtp->dev->stats.rx_dropped++;
+ return -1;
}
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
@@ -214,7 +306,6 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp0_header);
struct gtp0_header *gtp0;
- struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -224,16 +315,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp0->flags >> 5) != GTP_V0)
return 1;
- if (gtp0->type != GTP_TPDU)
- return 1;
-
- pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
- if (!pctx) {
- netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
- return 1;
- }
-
- return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ return gtp_rx(gtp, skb, hdrlen, GTP_V0, gtp->role, gtp0->tid, gtp0->flags, gtp0->type);
}
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
@@ -241,41 +323,30 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header);
struct gtp1_header *gtp1;
- struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+ netdev_dbg(gtp->dev, "GTPv1 recv: flags %x\n", gtp1->flags);
if ((gtp1->flags >> 5) != GTP_V1)
return 1;
- if (gtp1->type != GTP_TPDU)
- return 1;
-
/* From 29.060: "This field shall be present if and only if any one or
* more of the S, PN and E flags are set.".
*
* If any of the bit is set, then the remaining ones also have to be
* set.
*/
- if (gtp1->flags & GTP1_F_MASK)
- hdrlen += 4;
-
/* Make sure the header is larger enough, including extensions. */
if (!pskb_may_pull(skb, hdrlen))
return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
- pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
- if (!pctx) {
- netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
- return 1;
- }
-
- return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ return gtp_rx(gtp, skb, hdrlen, GTP_V1, gtp->role,
+ key32_to_tunnel_id(gtp1->tid), gtp1->flags, gtp1->type);
}
static void __gtp_encap_destroy(struct sock *sk)
@@ -315,6 +386,11 @@ static void gtp_encap_disable(struct gtp_dev *gtp)
{
gtp_encap_disable_sock(gtp->sk0);
gtp_encap_disable_sock(gtp->sk1u);
+ if (gtp->collect_md_sock) {
+ udp_tunnel_sock_release(gtp->collect_md_sock);
+ gtp->collect_md_sock = NULL;
+ netdev_dbg(gtp->dev, "GTP socket released.\n");
+ }
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -329,7 +405,8 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!gtp)
return 1;
- netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+ netdev_dbg(gtp->dev, "encap_recv sk=%p type %d\n",
+ sk, udp_sk(sk)->encap_type);
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
@@ -383,12 +460,13 @@ static void gtp_dev_uninit(struct net_device *dev)
static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
const struct sock *sk,
- __be32 daddr)
+ __be32 daddr,
+ __be32 saddr)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
- fl4->saddr = inet_sk(sk)->inet_saddr;
+ fl4->saddr = saddr;
fl4->flowi4_tos = RT_CONN_FLAGS(sk);
fl4->flowi4_proto = sk->sk_protocol;
@@ -412,7 +490,7 @@ static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
}
-static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+static inline void gtp1_push_header(struct sk_buff *skb, __be32 tid)
{
int payload_len = skb->len;
struct gtp1_header *gtp1;
@@ -428,46 +506,63 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
gtp1->flags = 0x30; /* v1, GTP-non-prime. */
gtp1->type = GTP_TPDU;
gtp1->length = htons(payload_len);
- gtp1->tid = htonl(pctx->u.v1.o_tei);
+ gtp1->tid = tid;
/* TODO: Suppport for extension header, sequence number and N-PDU.
* Update the length field if any of them is available.
*/
}
-struct gtp_pktinfo {
- struct sock *sk;
- struct iphdr *iph;
- struct flowi4 fl4;
- struct rtable *rt;
- struct pdp_ctx *pctx;
- struct net_device *dev;
- __be16 gtph_port;
-};
-
-static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
+static inline int gtp1_push_control_header(struct sk_buff *skb,
+ __be32 tid,
+ struct gtpu_metadata *opts,
+ struct net_device *dev)
{
- switch (pktinfo->pctx->gtp_version) {
- case GTP_V0:
- pktinfo->gtph_port = htons(GTP0_PORT);
- gtp0_push_header(skb, pktinfo->pctx);
- break;
- case GTP_V1:
- pktinfo->gtph_port = htons(GTP1U_PORT);
- gtp1_push_header(skb, pktinfo->pctx);
- break;
+ struct gtp1_header *gtp1c;
+ int payload_len;
+
+ if (opts->ver != GTP_METADATA_V1)
+ return -ENOENT;
+
+ if (opts->type == 0xFE) {
+ /* for end marker ignore skb data. */
+ netdev_dbg(dev, "xmit pkt with null data");
+ pskb_trim(skb, 0);
}
+ if (skb_cow_head(skb, sizeof(*gtp1c)) < 0)
+ return -ENOMEM;
+
+ payload_len = skb->len;
+
+ gtp1c = skb_push(skb, sizeof(*gtp1c));
+
+ gtp1c->flags = opts->flags;
+ gtp1c->type = opts->type;
+ gtp1c->length = htons(payload_len);
+ gtp1c->tid = tid;
+ netdev_dbg(dev, "xmit control pkt: ver %d flags %x type %x pkt len %d tid %x",
+ opts->ver, opts->flags, opts->type, skb->len, tid);
+ return 0;
}
+struct gtp_pktinfo {
+ struct sock *sk;
+ __u8 tos;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct net_device *dev;
+ __be16 gtph_port;
+};
+
static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
- struct sock *sk, struct iphdr *iph,
- struct pdp_ctx *pctx, struct rtable *rt,
+ struct sock *sk,
+ __u8 tos,
+ struct rtable *rt,
struct flowi4 *fl4,
struct net_device *dev)
{
pktinfo->sk = sk;
- pktinfo->iph = iph;
- pktinfo->pctx = pctx;
+ pktinfo->tos = tos;
pktinfo->rt = rt;
pktinfo->fl4 = *fl4;
pktinfo->dev = dev;
@@ -477,40 +572,99 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
struct gtp_pktinfo *pktinfo)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct gtpu_metadata *opts = NULL;
+ struct sock *sk = NULL;
struct pdp_ctx *pctx;
struct rtable *rt;
struct flowi4 fl4;
- struct iphdr *iph;
- __be16 df;
+ u8 gtp_version;
+ __be16 df = 0;
+ __be32 tun_id;
+ __be32 daddr;
+ __be32 saddr;
+ __u8 tos;
int mtu;
- /* Read the IP destination address and resolve the PDP context.
- * Prepend PDP header with TEI/TID from PDP ctx.
- */
- iph = ip_hdr(skb);
- if (gtp->role == GTP_ROLE_SGSN)
- pctx = ipv4_pdp_find(gtp, iph->saddr);
- else
- pctx = ipv4_pdp_find(gtp, iph->daddr);
+ if (gtp->collect_md) {
+ /* LWT GTP1U encap */
+ struct ip_tunnel_info *info = NULL;
- if (!pctx) {
- netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
- &iph->daddr);
- return -ENOENT;
+ info = skb_tunnel_info(skb);
+ if (!info) {
+ netdev_dbg(dev, "missing tunnel info");
+ return -ENOENT;
+ }
+ if (info->key.tp_dst && ntohs(info->key.tp_dst) != GTP1U_PORT) {
+ netdev_dbg(dev, "unexpected GTP dst port: %d", ntohs(info->key.tp_dst));
+ return -EOPNOTSUPP;
+ }
+ pctx = NULL;
+ gtp_version = GTP_V1;
+ tun_id = tunnel_id_to_key32(info->key.tun_id);
+ daddr = info->key.u.ipv4.dst;
+ saddr = info->key.u.ipv4.src;
+ sk = gtp->sk1u;
+ if (!sk) {
+ netdev_dbg(dev, "missing tunnel sock");
+ return -EOPNOTSUPP;
+ }
+ tos = info->key.tos;
+ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+
+ if (info->options_len != 0) {
+ if (info->key.tun_flags & TUNNEL_GTPU_OPT) {
+ opts = ip_tunnel_info_opts(info);
+ } else {
+ netdev_dbg(dev, "missing tunnel metadata for control pkt");
+ return -EOPNOTSUPP;
+ }
+ }
+ netdev_dbg(dev, "flow-based GTP1U encap: tunnel id %d\n",
+ be32_to_cpu(tun_id));
+ } else {
+ struct iphdr *iph;
+
+ if (ntohs(skb->protocol) != ETH_P_IP)
+ return -EOPNOTSUPP;
+
+ iph = ip_hdr(skb);
+
+ /* Read the IP destination address and resolve the PDP context.
+ * Prepend PDP header with TEI/TID from PDP ctx.
+ */
+ if (gtp->role == GTP_ROLE_SGSN)
+ pctx = ipv4_pdp_find(gtp, iph->saddr);
+ else
+ pctx = ipv4_pdp_find(gtp, iph->daddr);
+
+ if (!pctx) {
+ netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
+ &iph->daddr);
+ return -ENOENT;
+ }
+ sk = pctx->sk;
+ netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+ gtp_version = pctx->gtp_version;
+ tun_id = htonl(pctx->u.v1.o_tei);
+ daddr = pctx->peer_addr_ip4.s_addr;
+ saddr = inet_sk(sk)->inet_saddr;
+ tos = iph->tos;
+ df = iph->frag_off;
+ netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
+ &iph->saddr, &iph->daddr);
}
- netdev_dbg(dev, "found PDP context %p\n", pctx);
- rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
+ rt = ip4_route_output_gtp(&fl4, sk, daddr, saddr);
if (IS_ERR(rt)) {
- netdev_dbg(dev, "no route to SSGN %pI4\n",
- &pctx->peer_addr_ip4.s_addr);
+ netdev_dbg(dev, "no route to SSGN %pI4\n", &daddr);
dev->stats.tx_carrier_errors++;
goto err;
}
if (rt->dst.dev == dev) {
- netdev_dbg(dev, "circular route to SSGN %pI4\n",
- &pctx->peer_addr_ip4.s_addr);
+ netdev_dbg(dev, "circular route to SSGN %pI4\n", &daddr);
dev->stats.collisions++;
goto err_rt;
}
@@ -518,11 +672,10 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb_dst_drop(skb);
/* This is similar to tnl_update_pmtu(). */
- df = iph->frag_off;
if (df) {
mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
sizeof(struct iphdr) - sizeof(struct udphdr);
- switch (pctx->gtp_version) {
+ switch (gtp_version) {
case GTP_V0:
mtu -= sizeof(struct gtp0_header);
break;
@@ -536,17 +689,38 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
- if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
- mtu < ntohs(iph->tot_len)) {
- netdev_dbg(dev, "packet too big, fragmentation needed\n");
+ if (!skb_is_gso(skb) && (df & htons(IP_DF)) && mtu < skb->len) {
+ netdev_dbg(dev, "packet too big, fragmentation needed");
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
goto err_rt;
}
- gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
- gtp_push_header(skb, pktinfo);
+ gtp_set_pktinfo_ipv4(pktinfo, sk, tos, rt, &fl4, dev);
+
+ if (unlikely(opts)) {
+ int err;
+
+ pktinfo->gtph_port = htons(GTP1U_PORT);
+ err = gtp1_push_control_header(skb, tun_id, opts, dev);
+ if (err) {
+ netdev_info(dev, "cntr pkt error %d", err);
+ goto err_rt;
+ }
+ return 0;
+ }
+
+ switch (gtp_version) {
+ case GTP_V0:
+ pktinfo->gtph_port = htons(GTP0_PORT);
+ gtp0_push_header(skb, pctx);
+ break;
+ case GTP_V1:
+ pktinfo->gtph_port = htons(GTP1U_PORT);
+ gtp1_push_header(skb, tun_id);
+ break;
+ }
return 0;
err_rt:
@@ -557,7 +731,6 @@ err:
static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned int proto = ntohs(skb->protocol);
struct gtp_pktinfo pktinfo;
int err;
@@ -569,32 +742,22 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
rcu_read_lock();
- switch (proto) {
- case ETH_P_IP:
- err = gtp_build_skb_ip4(skb, dev, &pktinfo);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
+ err = gtp_build_skb_ip4(skb, dev, &pktinfo);
rcu_read_unlock();
if (err < 0)
goto tx_err;
- switch (proto) {
- case ETH_P_IP:
- netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
- &pktinfo.iph->saddr, &pktinfo.iph->daddr);
- udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
- pktinfo.fl4.saddr, pktinfo.fl4.daddr,
- pktinfo.iph->tos,
- ip4_dst_hoplimit(&pktinfo.rt->dst),
- 0,
- pktinfo.gtph_port, pktinfo.gtph_port,
- true, false);
- break;
- }
+ udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
+ pktinfo.fl4.saddr,
+ pktinfo.fl4.daddr,
+ pktinfo.tos,
+ ip4_dst_hoplimit(&pktinfo.rt->dst),
+ 0,
+ pktinfo.gtph_port,
+ pktinfo.gtph_port,
+ true,
+ false);
return NETDEV_TX_OK;
tx_err:
@@ -610,6 +773,19 @@ static const struct net_device_ops gtp_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
};
+static struct gtp_dev *gtp_find_flow_based_dev(struct net *net)
+{
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
+
+ list_for_each_entry(gtp, &gn->gtp_dev_list, list) {
+ if (gtp->collect_md)
+ return gtp;
+ }
+
+ return NULL;
+}
+
static void gtp_link_setup(struct net_device *dev)
{
dev->netdev_ops = &gtp_netdev_ops;
@@ -634,7 +810,7 @@ static void gtp_link_setup(struct net_device *dev)
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
-static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
+static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct nlattr *data[]);
static void gtp_destructor(struct net_device *dev)
{
@@ -652,11 +828,24 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct gtp_net *gn;
int hashsize, err;
- if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
+ if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1] &&
+ !data[IFLA_GTP_COLLECT_METADATA])
return -EINVAL;
gtp = netdev_priv(dev);
+ if (data[IFLA_GTP_COLLECT_METADATA]) {
+ if (data[IFLA_GTP_FD0]) {
+ netdev_dbg(dev, "LWT device does not support setting v0 socket");
+ return -EINVAL;
+ }
+ if (gtp_find_flow_based_dev(src_net)) {
+ netdev_dbg(dev, "LWT device already exist");
+ return -EBUSY;
+ }
+ gtp->collect_md = true;
+ }
+
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
} else {
@@ -669,7 +858,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
- err = gtp_encap_enable(gtp, data);
+ err = gtp_encap_enable(gtp, dev, data);
if (err < 0)
goto out_hashtable;
@@ -683,7 +872,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
list_add_rcu(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
- netdev_dbg(dev, "registered new GTP interface\n");
+ netdev_dbg(dev, "registered new GTP interface %s\n", dev->name);
return 0;
@@ -714,6 +903,7 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
+ [IFLA_GTP_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -737,6 +927,9 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
goto nla_put_failure;
+ if (gtp->collect_md && nla_put_flag(skb, IFLA_GTP_COLLECT_METADATA))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -782,35 +975,24 @@ err1:
return -ENOMEM;
}
-static struct sock *gtp_encap_enable_socket(int fd, int type,
- struct gtp_dev *gtp)
+static int __gtp_encap_enable_socket(struct socket *sock, int type,
+ struct gtp_dev *gtp)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
- struct socket *sock;
struct sock *sk;
- int err;
-
- pr_debug("enable gtp on %d, %d\n", fd, type);
-
- sock = sockfd_lookup(fd, &err);
- if (!sock) {
- pr_debug("gtp socket fd=%d not found\n", fd);
- return NULL;
- }
sk = sock->sk;
if (sk->sk_protocol != IPPROTO_UDP ||
sk->sk_type != SOCK_DGRAM ||
(sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
- pr_debug("socket fd=%d not UDP\n", fd);
- sk = ERR_PTR(-EINVAL);
- goto out_sock;
+ pr_debug("socket not UDP\n");
+ return -EINVAL;
}
lock_sock(sk);
if (sk->sk_user_data) {
- sk = ERR_PTR(-EBUSY);
- goto out_rel_sock;
+ release_sock(sock->sk);
+ return -EBUSY;
}
sock_hold(sk);
@@ -821,15 +1003,58 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
tuncfg.encap_destroy = gtp_encap_destroy;
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
-
-out_rel_sock:
release_sock(sock->sk);
-out_sock:
+ return 0;
+}
+
+static struct sock *gtp_encap_enable_socket(int fd, int type,
+ struct gtp_dev *gtp)
+{
+ struct socket *sock;
+ int err;
+
+ pr_debug("enable gtp on %d, %d\n", fd, type);
+
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ pr_debug("gtp socket fd=%d not found\n", fd);
+ return NULL;
+ }
+ err = __gtp_encap_enable_socket(sock, type, gtp);
sockfd_put(sock);
- return sk;
+ if (err)
+ return ERR_PTR(err);
+
+ return sock->sk;
}
-static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
+static struct socket *gtp_create_gtp_socket(struct gtp_dev *gtp, struct net_device *dev)
+{
+ struct udp_port_cfg udp_conf;
+ struct socket *sock;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ udp_conf.local_udp_port = htons(GTP1U_PORT);
+
+ err = udp_sock_create(dev_net(dev), &udp_conf, &sock);
+ if (err < 0) {
+ pr_debug("create gtp sock failed: %d\n", err);
+ return ERR_PTR(err);
+ }
+ err = __gtp_encap_enable_socket(sock, UDP_ENCAP_GTP1U, gtp);
+ if (err) {
+ pr_debug("enable gtp sock encap failed: %d\n", err);
+ udp_tunnel_sock_release(sock);
+ return ERR_PTR(err);
+ }
+ pr_debug("create gtp sock done\n");
+ return sock;
+}
+
+static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
@@ -853,11 +1078,25 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
}
}
+ if (data[IFLA_GTP_COLLECT_METADATA]) {
+ struct socket *sock;
+
+ if (!sk1u) {
+ sock = gtp_create_gtp_socket(gtp, dev);
+ if (IS_ERR(sock))
+ return PTR_ERR(sock);
+
+ gtp->collect_md_sock = sock;
+ sk1u = sock->sk;
+ } else {
+ gtp->collect_md_sock = NULL;
+ }
+ }
+
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN) {
- gtp_encap_disable_sock(sk0);
- gtp_encap_disable_sock(sk1u);
+ gtp_encap_disable(gtp);
return -EINVAL;
}
}
@@ -1416,7 +1655,7 @@ static int __init gtp_init(void)
if (err < 0)
goto unreg_genl_family;
- pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
+ pr_info("GTP module loaded (pdp ctx size %zd bytes) with tnl-md support\n",
sizeof(struct pdp_ctx));
return 0;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2350342b961f..6184e99c7f31 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -37,6 +37,10 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+ /* Block sending traffic to VF if it's about to be gone */
+ if (!vf)
+ net_device_ctx->data_path_is_vf = vf;
+
memset(init_pkt, 0, sizeof(struct nvsp_message));
init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
if (vf)
@@ -50,8 +54,11 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
vmbus_sendpacket(dev->channel, init_pkt,
sizeof(struct nvsp_message),
- VMBUS_RQST_ID_NO_RESPONSE,
- VM_PKT_DATA_INBAND, 0);
+ (unsigned long)init_pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ wait_for_completion(&nv_dev->channel_init_wait);
+ net_device_ctx->data_path_is_vf = vf;
}
/* Worker to setup sub channels on initial setup
@@ -754,8 +761,31 @@ static void netvsc_send_completion(struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
- const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
+ const struct nvsp_message *nvsp_packet;
u32 msglen = hv_pkt_datalen(desc);
+ struct nvsp_message *pkt_rqst;
+ u64 cmd_rqst;
+
+ /* First check if this is a VMBUS completion without data payload */
+ if (!msglen) {
+ cmd_rqst = vmbus_request_addr(&incoming_channel->requestor,
+ (u64)desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ netdev_err(ndev, "Invalid transaction id\n");
+ return;
+ }
+
+ pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
+ switch (pkt_rqst->hdr.msg_type) {
+ case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
+ complete(&net_device->channel_init_wait);
+ break;
+
+ default:
+ netdev_err(ndev, "Unexpected VMBUS completion!!\n");
+ }
+ return;
+ }
/* Ensure packet is big enough to read header fields */
if (msglen < sizeof(struct nvsp_message_header)) {
@@ -763,6 +793,7 @@ static void netvsc_send_completion(struct net_device *ndev,
return;
}
+ nvsp_packet = hv_pkt_data(desc);
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
@@ -887,6 +918,7 @@ static inline int netvsc_send_pkt(
int ret;
u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
+ memset(&nvmsg, 0, sizeof(struct nvsp_message));
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (skb)
rpkt->channel_type = 0; /* 0 is RMC_DATA */
@@ -1306,7 +1338,7 @@ static void netvsc_send_table(struct net_device *ndev,
sizeof(union nvsp_6_message_uber);
/* Boundary check for all versions */
- if (offset > msglen - count * sizeof(u32)) {
+ if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
netdev_err(ndev, "Received send-table offset too big:%u\n",
offset);
return;
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 440486d9c999..aa877da113f8 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -37,6 +37,12 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
if (!prog)
goto out;
+ /* Ensure that the below memcpy() won't overflow the page buffer. */
+ if (len > ndev->mtu + ETH_HLEN) {
+ act = XDP_DROP;
+ goto out;
+ }
+
/* allocate page buffer for data */
page = alloc_page(GFP_ATOMIC);
if (!page) {
@@ -44,12 +50,8 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
goto out;
}
- xdp->data_hard_start = page_address(page);
- xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &nvchan->xdp_rxq;
- xdp->frame_sz = PAGE_SIZE;
+ xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
+ xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
memcpy(xdp->data, data, len);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f32f28311d57..ac20c432d4d8 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -539,7 +539,8 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
*/
vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
if (vf_netdev && netif_running(vf_netdev) &&
- netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) &&
+ net_device_ctx->data_path_is_vf)
return netvsc_vf_xmit(net, vf_netdev, skb);
/* We will atmost need two pages to describe the rndis
@@ -760,6 +761,16 @@ void netvsc_linkstatus_callback(struct net_device *net,
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed;
+ /* Validate status_buf_offset */
+ if (indicate->status_buflen < sizeof(speed) ||
+ indicate->status_buf_offset < sizeof(*indicate) ||
+ resp->msg_len - RNDIS_HEADER_SIZE < indicate->status_buf_offset ||
+ resp->msg_len - RNDIS_HEADER_SIZE - indicate->status_buf_offset
+ < indicate->status_buflen) {
+ netdev_err(net, "invalid rndis_indicate_status packet\n");
+ return;
+ }
+
speed = *(u32 *)((void *)indicate
+ indicate->status_buf_offset) / 10000;
ndev_ctx->speed = speed;
@@ -865,8 +876,14 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
*/
if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
csum_info->receive.ip_checksum_succeeded &&
- skb->protocol == htons(ETH_P_IP))
+ skb->protocol == htons(ETH_P_IP)) {
+ /* Check that there is enough space to hold the IP header. */
+ if (skb_headlen(skb) < sizeof(struct iphdr)) {
+ kfree_skb(skb);
+ return NULL;
+ }
netvsc_comp_ipcsum(skb);
+ }
/* Do L4 checksum offload if enabled and present. */
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
@@ -2381,12 +2398,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
* interface, there is only the CHANGE event and no UP or DOWN event.
*/
-static int netvsc_vf_changed(struct net_device *vf_netdev)
+static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
struct net_device *ndev;
- bool vf_is_up = netif_running(vf_netdev);
+ bool vf_is_up = false;
+
+ if (event != NETDEV_GOING_DOWN)
+ vf_is_up = netif_running(vf_netdev);
ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
@@ -2399,7 +2419,6 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
- net_device_ctx->data_path_is_vf = vf_is_up;
netvsc_switch_datapath(ndev, vf_is_up);
netdev_info(ndev, "Data path switched %s VF: %s\n",
@@ -2716,7 +2735,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_CHANGE:
- return netvsc_vf_changed(event_dev);
+ case NETDEV_GOING_DOWN:
+ return netvsc_vf_changed(event_dev, event);
default:
return NOTIFY_DONE;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 598713c0d5a8..c8534b6619b8 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -131,66 +131,84 @@ static void dump_rndis_message(struct net_device *netdev,
{
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
- "data offset %u data len %u, # oob %u, "
- "oob offset %u, oob len %u, pkt offset %u, "
- "pkt len %u\n",
- rndis_msg->msg_len,
- rndis_msg->msg.pkt.data_offset,
- rndis_msg->msg.pkt.data_len,
- rndis_msg->msg.pkt.num_oob_data_elements,
- rndis_msg->msg.pkt.oob_data_offset,
- rndis_msg->msg.pkt.oob_data_len,
- rndis_msg->msg.pkt.per_pkt_info_offset,
- rndis_msg->msg.pkt.per_pkt_info_len);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
+ const struct rndis_packet *pkt = &rndis_msg->msg.pkt;
+ netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
+ "data offset %u data len %u, # oob %u, "
+ "oob offset %u, oob len %u, pkt offset %u, "
+ "pkt len %u\n",
+ rndis_msg->msg_len,
+ pkt->data_offset,
+ pkt->data_len,
+ pkt->num_oob_data_elements,
+ pkt->oob_data_offset,
+ pkt->oob_data_len,
+ pkt->per_pkt_info_offset,
+ pkt->per_pkt_info_len);
+ }
break;
case RNDIS_MSG_INIT_C:
- netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
- "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
- "device flags %d, max xfer size 0x%x, max pkts %u, "
- "pkt aligned %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.init_complete.req_id,
- rndis_msg->msg.init_complete.status,
- rndis_msg->msg.init_complete.major_ver,
- rndis_msg->msg.init_complete.minor_ver,
- rndis_msg->msg.init_complete.dev_flags,
- rndis_msg->msg.init_complete.max_xfer_size,
- rndis_msg->msg.init_complete.
- max_pkt_per_msg,
- rndis_msg->msg.init_complete.
- pkt_alignment_factor);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_initialize_complete)) {
+ const struct rndis_initialize_complete *init_complete =
+ &rndis_msg->msg.init_complete;
+ netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
+ "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
+ "device flags %d, max xfer size 0x%x, max pkts %u, "
+ "pkt aligned %u)\n",
+ rndis_msg->msg_len,
+ init_complete->req_id,
+ init_complete->status,
+ init_complete->major_ver,
+ init_complete->minor_ver,
+ init_complete->dev_flags,
+ init_complete->max_xfer_size,
+ init_complete->max_pkt_per_msg,
+ init_complete->pkt_alignment_factor);
+ }
break;
case RNDIS_MSG_QUERY_C:
- netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
- "(len %u, id 0x%x, status 0x%x, buf len %u, "
- "buf offset %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.query_complete.req_id,
- rndis_msg->msg.query_complete.status,
- rndis_msg->msg.query_complete.
- info_buflen,
- rndis_msg->msg.query_complete.
- info_buf_offset);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_query_complete)) {
+ const struct rndis_query_complete *query_complete =
+ &rndis_msg->msg.query_complete;
+ netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
+ "(len %u, id 0x%x, status 0x%x, buf len %u, "
+ "buf offset %u)\n",
+ rndis_msg->msg_len,
+ query_complete->req_id,
+ query_complete->status,
+ query_complete->info_buflen,
+ query_complete->info_buf_offset);
+ }
break;
case RNDIS_MSG_SET_C:
- netdev_dbg(netdev,
- "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.set_complete.req_id,
- rndis_msg->msg.set_complete.status);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
+ const struct rndis_set_complete *set_complete =
+ &rndis_msg->msg.set_complete;
+ netdev_dbg(netdev,
+ "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
+ rndis_msg->msg_len,
+ set_complete->req_id,
+ set_complete->status);
+ }
break;
case RNDIS_MSG_INDICATE:
- netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
- "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.indicate_status.status,
- rndis_msg->msg.indicate_status.status_buflen,
- rndis_msg->msg.indicate_status.status_buf_offset);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_indicate_status)) {
+ const struct rndis_indicate_status *indicate_status =
+ &rndis_msg->msg.indicate_status;
+ netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
+ "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
+ rndis_msg->msg_len,
+ indicate_status->status,
+ indicate_status->status_buflen,
+ indicate_status->status_buf_offset);
+ }
break;
default:
@@ -246,11 +264,20 @@ static void rndis_set_link_state(struct rndis_device *rdev,
{
u32 link_status;
struct rndis_query_complete *query_complete;
+ u32 msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
+ return;
query_complete = &request->response_msg.msg.query_complete;
if (query_complete->status == RNDIS_STATUS_SUCCESS &&
- query_complete->info_buflen == sizeof(u32)) {
+ query_complete->info_buflen >= sizeof(u32) &&
+ query_complete->info_buf_offset >= sizeof(*query_complete) &&
+ msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ >= query_complete->info_buflen) {
memcpy(&link_status, (void *)((unsigned long)query_complete +
query_complete->info_buf_offset), sizeof(u32));
rdev->link_state = link_status != 0;
@@ -343,7 +370,8 @@ static void rndis_filter_receive_response(struct net_device *ndev,
*/
static inline void *rndis_get_ppi(struct net_device *ndev,
struct rndis_packet *rpkt,
- u32 rpkt_len, u32 type, u8 internal)
+ u32 rpkt_len, u32 type, u8 internal,
+ u32 ppi_size)
{
struct rndis_per_packet_info *ppi;
int len;
@@ -359,7 +387,8 @@ static inline void *rndis_get_ppi(struct net_device *ndev,
return NULL;
}
- if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
+ if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
+ rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
rpkt->per_pkt_info_len);
return NULL;
@@ -381,8 +410,15 @@ static inline void *rndis_get_ppi(struct net_device *ndev,
continue;
}
- if (ppi->type == type && ppi->internal == internal)
+ if (ppi->type == type && ppi->internal == internal) {
+ /* ppi->size should be big enough to hold the returned object. */
+ if (ppi->size - ppi->ppi_offset < ppi_size) {
+ netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
+ ppi->size, ppi->ppi_offset);
+ continue;
+ }
return (void *)((ulong)ppi + ppi->ppi_offset);
+ }
len -= ppi->size;
ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
}
@@ -461,13 +497,16 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
- vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
+ vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan));
- csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
+ csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
+ sizeof(*csum_info));
- hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
+ hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
+ sizeof(*hash_info));
- pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
+ pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
+ sizeof(*pktinfo_id));
data = (void *)msg + data_offset;
@@ -522,9 +561,6 @@ int rndis_filter_receive(struct net_device *ndev,
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct rndis_message *rndis_msg = data;
- if (netif_msg_rx_status(net_device_ctx))
- dump_rndis_message(ndev, rndis_msg);
-
/* Validate incoming rndis_message packet */
if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
buflen < rndis_msg->msg_len) {
@@ -533,6 +569,9 @@ int rndis_filter_receive(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
+ if (netif_msg_rx_status(net_device_ctx))
+ dump_rndis_message(ndev, rndis_msg);
+
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
return rndis_filter_receive_data(ndev, net_dev, nvchan,
@@ -567,6 +606,7 @@ static int rndis_filter_query_device(struct rndis_device *dev,
u32 inresult_size = *result_size;
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
+ u32 msg_len;
int ret = 0;
if (!result)
@@ -634,8 +674,19 @@ static int rndis_filter_query_device(struct rndis_device *dev,
/* Copy the response back */
query_complete = &request->response_msg.msg.query_complete;
+ msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
+ ret = -1;
+ goto cleanup;
+ }
- if (query_complete->info_buflen > inresult_size) {
+ if (query_complete->info_buflen > inresult_size ||
+ query_complete->info_buf_offset < sizeof(*query_complete) ||
+ msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ < query_complete->info_buflen) {
ret = -1;
goto cleanup;
}
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 9f0d2a93379c..b68f1289b89e 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -1,9 +1,10 @@
config QCOM_IPA
tristate "Qualcomm IPA support"
- depends on ARCH_QCOM && 64BIT && NET
- depends on QCOM_Q6V5_MSS
+ depends on 64BIT && NET && QCOM_SMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_QMI_HELPERS
- select QCOM_MDT_LOADER
help
Choose Y or M here to include support for the Qualcomm
IP Accelerator (IPA), a hardware block present in some
@@ -11,7 +12,8 @@ config QCOM_IPA
that is capable of generic hardware handling of IP packets,
including routing, filtering, and NAT. Currently the IPA
driver supports only basic transport of network traffic
- between the AP and modem, on the Qualcomm SDM845 SoC.
+ between the AP and modem, on the Qualcomm SDM845 and SC7180
+ SoCs.
Note that if selected, the selection type must match that
of QCOM_Q6V5_COMMON (Y or M).
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 14d9a791924b..5b29f7d9d6ac 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -89,9 +89,9 @@
/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
-#define GSI_CMD_TIMEOUT 5 /* seconds */
+#define GSI_CMD_TIMEOUT 50 /* milliseconds */
-#define GSI_CHANNEL_STOP_RX_RETRIES 10
+#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
@@ -220,6 +220,58 @@ static void gsi_irq_teardown(struct gsi *gsi)
/* Nothing to do */
}
+/* Event ring commands are performed one at a time. Their completion
+ * is signaled by the event ring control GSI interrupt type, which is
+ * only enabled when we issue an event ring command. Only the event
+ * ring being operated on has this interrupt enabled.
+ */
+static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val = BIT(evt_ring_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+}
+
+/* Disable event ring control interrupts */
+static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+}
+
+/* Channel commands are performed one at a time. Their completion is
+ * signaled by the channel control GSI interrupt type, which is only
+ * enabled when we issue a channel command. Only the channel being
+ * operated on has this interrupt enabled.
+ */
+static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
+{
+ u32 val = BIT(channel_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+}
+
+/* Disable channel control interrupts */
+static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+}
+
static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
{
bool enable_ieob = !gsi->ieob_enabled_bitmap;
@@ -307,11 +359,13 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
static bool
gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
{
+ unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+
reinit_completion(completion);
iowrite32(val, gsi->virt + reg);
- return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
+ return !!wait_for_completion_timeout(completion, timeout);
}
/* Return the hardware's notion of the current state of an event ring */
@@ -326,41 +380,26 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one event ring command at a time, and event
- * control interrupts should only occur when such a command
- * is issued here. Only permit *this* event ring to trigger
- * an interrupt, and only enable the event control IRQ type
- * when we expect it to occur.
- *
- * There's a small chance that a previous command completed
- * after the interrupt was disabled, so make sure we have no
- * pending interrupts before we enable them.
- */
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
-
- val = BIT(evt_ring_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_EV_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ev_ctrl_disable(gsi);
- if (success)
+ if (!timeout)
return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
@@ -380,7 +419,7 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return -EINVAL;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
/* If successful the event ring state will have changed */
if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
@@ -405,7 +444,7 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
/* If successful the event ring state will have changed */
if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
@@ -426,7 +465,7 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
/* If successful the event ring state will have changed */
if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
@@ -456,34 +495,19 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one channel command at a time, and channel
- * control interrupts should only occur when such a command is
- * issued here. So we only permit *this* channel to trigger
- * an interrupt and only enable the channel control IRQ type
- * when we expect it to occur.
- *
- * There's a small chance that a previous command completed
- * after the interrupt was disabled, so make sure we have no
- * pending interrupts before we enable them.
- */
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
-
- val = BIT(channel_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ch_ctrl_enable(gsi, channel_id);
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_CH_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ch_ctrl_disable(gsi);
- if (success)
+ if (!timeout)
return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
@@ -589,7 +613,8 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- msleep(1); /* A short delay is required before a RESET command */
+ /* A short delay is required before a RESET command */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
@@ -864,21 +889,18 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- u32 retries;
+ u32 retries = GSI_CHANNEL_STOP_RETRIES;
int ret;
gsi_channel_freeze(channel);
- /* RX channels might require a little time to enter STOPPED state */
- retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
-
mutex_lock(&gsi->mutex);
do {
ret = gsi_channel_stop_command(channel);
if (ret != -EAGAIN)
break;
- msleep(1);
+ usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
} while (retries--);
mutex_unlock(&gsi->mutex);
@@ -1627,7 +1649,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode)
{
struct completion *completion = &gsi->completion;
- bool success;
+ bool timeout;
u32 val;
/* The error global interrupt type is always enabled (until we
@@ -1650,12 +1672,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- if (success)
+ if (!timeout)
return gsi->result;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 4d4606b5fa95..3a4ab8a94d82 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -13,6 +13,7 @@
#include "ipa_cmd.h"
+struct page;
struct scatterlist;
struct device;
struct sk_buff;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 135c393437f1..354675a643db 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -31,142 +31,154 @@
*/
/**
+ * struct ipa_interconnect - IPA interconnect information
+ * @path: Interconnect path
+ * @average_bandwidth: Average interconnect bandwidth (KB/second)
+ * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
+ */
+struct ipa_interconnect {
+ struct icc_path *path;
+ u32 average_bandwidth;
+ u32 peak_bandwidth;
+};
+
+/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
* @mutex: Protects clock enable/disable
* @core: IPA core clock
- * @memory_path: Memory interconnect
- * @imem_path: Internal memory interconnect
- * @config_path: Configuration space interconnect
- * @interconnect_data: Interconnect configuration data
+ * @interconnect_count: Number of elements in interconnect[]
+ * @interconnect: Interconnect array
*/
struct ipa_clock {
refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
- struct icc_path *memory_path;
- struct icc_path *imem_path;
- struct icc_path *config_path;
- const struct ipa_interconnect_data *interconnect_data;
+ u32 interconnect_count;
+ struct ipa_interconnect *interconnect;
};
-static struct icc_path *
-ipa_interconnect_init_one(struct device *dev, const char *name)
+static int ipa_interconnect_init_one(struct device *dev,
+ struct ipa_interconnect *interconnect,
+ const struct ipa_interconnect_data *data)
{
struct icc_path *path;
- path = of_icc_get(dev, name);
- if (IS_ERR(path))
- dev_err(dev, "error %ld getting %s interconnect\n",
- PTR_ERR(path), name);
+ path = of_icc_get(dev, data->name);
+ if (IS_ERR(path)) {
+ int ret = PTR_ERR(path);
+
+ dev_err(dev, "error %d getting %s interconnect\n", ret,
+ data->name);
- return path;
+ return ret;
+ }
+
+ interconnect->path = path;
+ interconnect->average_bandwidth = data->average_bandwidth;
+ interconnect->peak_bandwidth = data->peak_bandwidth;
+
+ return 0;
}
-/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
+static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
{
- struct icc_path *path;
-
- path = ipa_interconnect_init_one(dev, "memory");
- if (IS_ERR(path))
- goto err_return;
- clock->memory_path = path;
+ icc_put(interconnect->path);
+ memset(interconnect, 0, sizeof(*interconnect));
+}
- path = ipa_interconnect_init_one(dev, "imem");
- if (IS_ERR(path))
- goto err_memory_path_put;
- clock->imem_path = path;
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev,
+ const struct ipa_interconnect_data *data)
+{
+ struct ipa_interconnect *interconnect;
+ u32 count;
+ int ret;
- path = ipa_interconnect_init_one(dev, "config");
- if (IS_ERR(path))
- goto err_imem_path_put;
- clock->config_path = path;
+ count = clock->interconnect_count;
+ interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
+ if (!interconnect)
+ return -ENOMEM;
+ clock->interconnect = interconnect;
+
+ while (count--) {
+ ret = ipa_interconnect_init_one(dev, interconnect, data++);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_put:
- icc_put(clock->imem_path);
-err_memory_path_put:
- icc_put(clock->memory_path);
-err_return:
- return PTR_ERR(path);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
+
+ return ret;
}
/* Inverse of ipa_interconnect_init() */
static void ipa_interconnect_exit(struct ipa_clock *clock)
{
- icc_put(clock->config_path);
- icc_put(clock->imem_path);
- icc_put(clock->memory_path);
+ struct ipa_interconnect *interconnect;
+
+ interconnect = clock->interconnect + clock->interconnect_count;
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
}
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
int ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- return ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->imem_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_memory_path_disable;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->config_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_imem_path_disable;
+ u32 i;
+
+ interconnect = clock->interconnect;
+ for (i = 0; i < clock->interconnect_count; i++) {
+ ret = icc_set_bw(interconnect->path,
+ interconnect->average_bandwidth,
+ interconnect->peak_bandwidth);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_disable:
- (void)icc_set_bw(clock->imem_path, 0, 0);
-err_memory_path_disable:
- (void)icc_set_bw(clock->memory_path, 0, 0);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ (void)icc_set_bw(interconnect->path, 0, 0);
return ret;
}
/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+static void ipa_interconnect_disable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
+ int result = 0;
+ u32 count;
int ret;
- ret = icc_set_bw(clock->memory_path, 0, 0);
- if (ret)
- return ret;
-
- ret = icc_set_bw(clock->imem_path, 0, 0);
- if (ret)
- goto err_memory_path_reenable;
-
- ret = icc_set_bw(clock->config_path, 0, 0);
- if (ret)
- goto err_imem_path_reenable;
-
- return 0;
-
-err_imem_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- (void)icc_set_bw(clock->imem_path, data->average_rate,
- data->peak_rate);
-err_memory_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- (void)icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
+ count = clock->interconnect_count;
+ interconnect = clock->interconnect + count;
+ while (count--) {
+ interconnect--;
+ ret = icc_set_bw(interconnect->path, 0, 0);
+ if (ret && !result)
+ result = ret;
+ }
- return ret;
+ if (result)
+ dev_err(&ipa->pdev->dev,
+ "error %d disabling IPA interconnects\n", ret);
}
/* Turn on IPA clocks, including interconnects */
@@ -189,7 +201,7 @@ static int ipa_clock_enable(struct ipa *ipa)
static void ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
- (void)ipa_interconnect_disable(ipa);
+ ipa_interconnect_disable(ipa);
}
/* Get an IPA clock reference, but only if the reference count is
@@ -286,9 +298,9 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
goto err_clk_put;
}
clock->core = clk;
- clock->interconnect_data = data->interconnect;
+ clock->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(clock, dev);
+ ret = ipa_interconnect_init(clock, dev, data->interconnect_data);
if (ret)
goto err_kfree;
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index 5cc0ed77edb9..997b51ceb7d7 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -309,24 +309,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 465000, /* 465 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 68570, /* 68.570 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 30000, /* 30 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 100 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 465000, /* 465 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 68570, /* 68.570 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 30000, /* 30 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SC7180 SoC. */
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index f8fee8d3ca42..88c9c3562ab7 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -329,24 +329,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 350000, /* 350 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 40000, /* 40 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 75 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 600000, /* 600 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 350000, /* 350 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 40000, /* 40 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SDM845 SoC. */
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 0ed5ffe2b8da..b476fc373f7f 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -258,32 +258,28 @@ struct ipa_mem_data {
u32 smem_size;
};
-/** enum ipa_interconnect_id - IPA interconnect identifier */
-enum ipa_interconnect_id {
- IPA_INTERCONNECT_MEMORY,
- IPA_INTERCONNECT_IMEM,
- IPA_INTERCONNECT_CONFIG,
- IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */
-};
-
/**
- * struct ipa_interconnect_data - description of IPA interconnect rates
- * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units)
- * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units)
+ * struct ipa_interconnect_data - description of IPA interconnect bandwidths
+ * @name: Interconnect name (matches interconnect-name in DT)
+ * @peak_bandwidth: Peak interconnect bandwidth (in 1000 byte/sec units)
+ * @average_bandwidth: Average interconnect bandwidth (in 1000 byte/sec units)
*/
struct ipa_interconnect_data {
- u32 peak_rate;
- u32 average_rate;
+ const char *name;
+ u32 peak_bandwidth;
+ u32 average_bandwidth;
};
/**
* struct ipa_clock_data - description of IPA clock and interconnect rates
* @core_clock_rate: Core clock rate (Hz)
- * @interconnect: Array of interconnect bandwidth parameters
+ * @interconnect_count: Number of entries in the interconnect_data array
+ * @interconnect_data: IPA interconnect configuration data
*/
struct ipa_clock_data {
u32 core_clock_rate;
- struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT];
+ u32 interconnect_count; /* # entries in interconnect_data[] */
+ const struct ipa_interconnect_data *interconnect_data;
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 9f4be9812a1f..688a3dd40510 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1378,7 +1378,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
do {
if (!ipa_endpoint_aggr_active(endpoint))
break;
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
} while (retries--);
/* Check one last time */
@@ -1399,7 +1399,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
*/
gsi_channel_reset(gsi, endpoint->channel_id, true);
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
goto out_suspend_again;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fb51329f8964..9a9a5cf36a4b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1385,7 +1385,7 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
return ret;
}
- if (!data || !data[IFLA_MACVLAN_MACADDR_DATA])
+ if (!data[IFLA_MACVLAN_MACADDR_DATA])
return 0;
head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index fa41d8c42f05..5f3a4cc92a88 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -122,7 +122,7 @@ static const struct net_device_ops mhi_netdev_ops = {
static void mhi_net_setup(struct net_device *ndev)
{
ndev->header_ops = NULL; /* No header */
- ndev->type = ARPHRD_NONE; /* QMAP... */
+ ndev->type = ARPHRD_RAWIP;
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
@@ -158,7 +158,18 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
- skb->protocol = htons(ETH_P_MAP);
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
skb_put(skb, mhi_res->bytes_xferd);
netif_rx(skb);
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 7178468302c8..aec92440eef1 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -258,8 +258,6 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_devlink_port = nsim_get_devlink_port,
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index d0b36fd6c265..d67bddc111e3 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -132,6 +132,11 @@
#define AT803X_MIN_DOWNSHIFT 2
#define AT803X_MAX_DOWNSHIFT 9
+#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
+#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
+#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
+#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
+
#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
@@ -146,8 +151,11 @@ MODULE_LICENSE("GPL");
struct at803x_priv {
int flags;
#define AT803X_KEEP_PLL_ENABLED BIT(0) /* don't turn off internal PLL */
+#define AT803X_DISABLE_SMARTEEE BIT(1)
u16 clk_25m_reg;
u16 clk_25m_mask;
+ u8 smarteee_lpi_tw_1g;
+ u8 smarteee_lpi_tw_100m;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
struct regulator *vddio;
@@ -411,13 +419,32 @@ static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
struct at803x_priv *priv = phydev->priv;
- u32 freq, strength;
+ u32 freq, strength, tw;
unsigned int sel;
int ret;
if (!IS_ENABLED(CONFIG_OF_MDIO))
return 0;
+ if (of_property_read_bool(node, "qca,disable-smarteee"))
+ priv->flags |= AT803X_DISABLE_SMARTEEE;
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_1g = tw;
+ }
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_100m = tw;
+ }
+
ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
if (!ret) {
switch (freq) {
@@ -526,22 +553,47 @@ static void at803x_remove(struct phy_device *phydev)
regulator_disable(priv->vddio);
}
-static int at803x_clk_out_config(struct phy_device *phydev)
+static int at803x_smarteee_config(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
- int val;
+ u16 mask = 0, val = 0;
+ int ret;
- if (!priv->clk_25m_mask)
+ if (priv->flags & AT803X_DISABLE_SMARTEEE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
+
+ if (priv->smarteee_lpi_tw_1g) {
+ mask |= 0xff00;
+ val |= priv->smarteee_lpi_tw_1g << 8;
+ }
+ if (priv->smarteee_lpi_tw_100m) {
+ mask |= 0x00ff;
+ val |= priv->smarteee_lpi_tw_100m;
+ }
+ if (!mask)
return 0;
- val = phy_read_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M);
- if (val < 0)
- return val;
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
+ mask, val);
+ if (ret)
+ return ret;
- val &= ~priv->clk_25m_mask;
- val |= priv->clk_25m_reg;
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
- return phy_write_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M, val);
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
+ priv->clk_25m_mask, priv->clk_25m_reg);
}
static int at8031_pll_config(struct phy_device *phydev)
@@ -584,6 +636,10 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = at803x_smarteee_config(phydev);
+ if (ret < 0)
+ return ret;
+
ret = at803x_clk_out_config(phydev);
if (ret < 0)
return ret;
@@ -594,7 +650,13 @@ static int at803x_config_init(struct phy_device *phydev)
return ret;
}
- return 0;
+ /* Ar803x extended next page bit is enabled by default. Cisco
+ * multigig switches read this bit and attempt to negotiate 10Gbps
+ * rates even if the next page bit is disabled. This is incorrect
+ * behaviour but we still need to accommodate it. XNP is only needed
+ * for 10Gbps support, so disable XNP.
+ */
+ return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
}
static int at803x_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 15812001b3ff..e79297a4bae8 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -612,6 +612,7 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM72116, "Broadcom BCM72116"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
@@ -633,6 +634,7 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
+ { PHY_ID_BCM72116, 0xfffffff0, },
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2b42e46066b4..040509b81f02 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -740,7 +740,7 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+ lockdep_assert_held_once(&bus->mdio_lock);
retval = bus->read(bus, addr, regnum);
@@ -766,7 +766,7 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+ lockdep_assert_held_once(&bus->mdio_lock);
err = bus->write(bus, addr, regnum, val);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 54e0d75203da..39c7c786a912 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1389,7 +1389,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
- .name = "Micrel KSZ886X Switch",
+ .name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 5a8c8eb18582..46160baaafe3 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -19,8 +19,6 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
-#define DEBUG
-
/* DP83865 phy identifier values */
#define DP83865_PHY_ID 0x20005c7a
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 45f75533c47c..9cb7e4dbf8f4 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -724,7 +724,7 @@ static int phy_check_link_status(struct phy_device *phydev)
{
int err;
- WARN_ON(!mutex_is_locked(&phydev->lock));
+ lockdep_assert_held(&phydev->lock);
/* Keep previous state if loopback is enabled because some PHYs
* report that Link is Down when loopback is enabled.
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 80c2e646c093..8447e56ba572 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1740,7 +1740,7 @@ int __phy_resume(struct phy_device *phydev)
struct phy_driver *phydrv = phydev->drv;
int ret;
- WARN_ON(!mutex_is_locked(&phydev->lock));
+ lockdep_assert_held(&phydev->lock);
if (!phydrv || !phydrv->resume)
return 0;
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 20b91f5dfc6e..3c67ad9951ab 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -265,6 +265,12 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
+ /* 100Base-FX, 100Base-LX, 100Base-PX, 100Base-BX10 */
+ if (id->base.e100_base_fx || id->base.e100_base_lx)
+ phylink_set(modes, 100baseFX_Full);
+ if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100)
+ phylink_set(modes, 100baseFX_Full);
+
/* For active or passive cables, select the link modes
* based on the bit rates and the cable compliance bytes.
*/
@@ -337,11 +343,16 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
* the bitrate to determine supported modes. Some BiDi modules (eg,
* 1310nm/1550nm) are not 1000BASE-BX compliant due to the differing
* wavelengths, so do not set any transceiver bits.
+ *
+ * Do the same for modules supporting 2500BASE-X. Note that some
+ * modules use 2500Mbaud rather than 3100 or 3200Mbaud for
+ * 2500BASE-X, so we allow some slack here.
*/
- if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
- /* If the bit rate allows 1000baseX */
- if (br_nom && br_min <= 1300 && br_max >= 1200)
+ if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS) && br_nom) {
+ if (br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
+ if (br_min <= 3200 && br_max >= 2500)
+ phylink_set(modes, 2500baseX_Full);
}
if (bus->sfp_quirk)
@@ -384,6 +395,9 @@ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
if (phylink_test(link_modes, 1000baseX_Full))
return PHY_INTERFACE_MODE_1000BASEX;
+ if (phylink_test(link_modes, 100baseFX_Full))
+ return PHY_INTERFACE_MODE_100BASEX;
+
dev_warn(bus->sfp_dev, "Unable to ascertain link mode\n");
return PHY_INTERFACE_MODE_NA;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 91d74c1a920a..b2a5ed6915fa 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/ctype.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/hwmon.h>
@@ -258,6 +259,9 @@ struct sfp {
char *hwmon_name;
#endif
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dir;
+#endif
};
static bool sff_module_supported(const struct sfp_eeprom_id *id)
@@ -1390,6 +1394,54 @@ static void sfp_module_tx_enable(struct sfp *sfp)
sfp_set_state(sfp, sfp->state);
}
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int sfp_debug_state_show(struct seq_file *s, void *data)
+{
+ struct sfp *sfp = s->private;
+
+ seq_printf(s, "Module state: %s\n",
+ mod_state_to_str(sfp->sm_mod_state));
+ seq_printf(s, "Module probe attempts: %d %d\n",
+ R_PROBE_RETRY_INIT - sfp->sm_mod_tries_init,
+ R_PROBE_RETRY_SLOW - sfp->sm_mod_tries);
+ seq_printf(s, "Device state: %s\n",
+ dev_state_to_str(sfp->sm_dev_state));
+ seq_printf(s, "Main state: %s\n",
+ sm_state_to_str(sfp->sm_state));
+ seq_printf(s, "Fault recovery remaining retries: %d\n",
+ sfp->sm_fault_retries);
+ seq_printf(s, "PHY probe remaining retries: %d\n",
+ sfp->sm_phy_retries);
+ seq_printf(s, "moddef0: %d\n", !!(sfp->state & SFP_F_PRESENT));
+ seq_printf(s, "rx_los: %d\n", !!(sfp->state & SFP_F_LOS));
+ seq_printf(s, "tx_fault: %d\n", !!(sfp->state & SFP_F_TX_FAULT));
+ seq_printf(s, "tx_disable: %d\n", !!(sfp->state & SFP_F_TX_DISABLE));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(sfp_debug_state);
+
+static void sfp_debugfs_init(struct sfp *sfp)
+{
+ sfp->debugfs_dir = debugfs_create_dir(dev_name(sfp->dev), NULL);
+
+ debugfs_create_file("state", 0600, sfp->debugfs_dir, sfp,
+ &sfp_debug_state_fops);
+}
+
+static void sfp_debugfs_exit(struct sfp *sfp)
+{
+ debugfs_remove_recursive(sfp->debugfs_dir);
+}
+#else
+static void sfp_debugfs_init(struct sfp *sfp)
+{
+}
+
+static void sfp_debugfs_exit(struct sfp *sfp)
+{
+}
+#endif
+
static void sfp_module_tx_fault_reset(struct sfp *sfp)
{
unsigned int state = sfp->state;
@@ -1482,15 +1534,19 @@ static void sfp_sm_link_down(struct sfp *sfp)
static void sfp_sm_link_check_los(struct sfp *sfp)
{
- unsigned int los = sfp->state & SFP_F_LOS;
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+ bool los = false;
/* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
- * are set, we assume that no LOS signal is available.
+ * are set, we assume that no LOS signal is available. If both are
+ * set, we assume LOS is not implemented (and is meaningless.)
*/
- if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
- los ^= SFP_F_LOS;
- else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
- los = 0;
+ if (los_options == los_inverted)
+ los = !(sfp->state & SFP_F_LOS);
+ else if (los_options == los_normal)
+ los = !!(sfp->state & SFP_F_LOS);
if (los)
sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
@@ -1500,18 +1556,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
{
- return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
- event == SFP_E_LOS_LOW) ||
- (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
- event == SFP_E_LOS_HIGH);
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+
+ return (los_options == los_inverted && event == SFP_E_LOS_LOW) ||
+ (los_options == los_normal && event == SFP_E_LOS_HIGH);
}
static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
{
- return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
- event == SFP_E_LOS_HIGH) ||
- (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
- event == SFP_E_LOS_LOW);
+ const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
+ const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
+ __be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
+
+ return (los_options == los_inverted && event == SFP_E_LOS_HIGH) ||
+ (los_options == los_normal && event == SFP_E_LOS_LOW);
}
static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
@@ -2483,6 +2543,8 @@ static int sfp_probe(struct platform_device *pdev)
if (!sfp->sfp_bus)
return -ENOMEM;
+ sfp_debugfs_init(sfp);
+
return 0;
}
@@ -2490,6 +2552,7 @@ static int sfp_remove(struct platform_device *pdev)
{
struct sfp *sfp = platform_get_drvdata(pdev);
+ sfp_debugfs_exit(sfp);
sfp_unregister_socket(sfp->sfp_bus);
rtnl_lock();
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index ee5058445d06..0fe78826c8fa 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -278,10 +278,8 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
header = (struct pptp_gre_header *)(skb->data);
/* ack in different place if S = 0 */
- ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq;
-
- ack = ntohl(ack);
-
+ ack = GRE_IS_SEQ(header->gre_hd.flags) ? ntohl(header->ack) :
+ ntohl(header->seq);
if (ack > opt->ack_recv)
opt->ack_recv = ack;
/* also handle sequence number wrap-around */
@@ -355,7 +353,7 @@ static int pptp_rcv(struct sk_buff *skb)
/* if invalid, discard this packet */
goto drop;
- po = lookup_chan(htons(header->call_id), iph->saddr);
+ po = lookup_chan(ntohs(header->call_id), iph->saddr);
if (po) {
skb_dst_drop(skb);
nf_reset_ct(skb);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 1f4bdd94407a..ff4aa35979a1 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -713,8 +713,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_probe_transport_header(skb);
/* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
+ if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
@@ -722,12 +721,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = msg_control;
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(uarg, false);
+ uarg->callback(NULL, uarg, false);
}
if (tap) {
@@ -1166,8 +1163,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
}
/* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
+ if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 978ac0981d16..62690baa19bc 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1599,12 +1599,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
struct xdp_buff xdp;
u32 act;
- xdp.data_hard_start = buf;
- xdp.data = buf + pad;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + len;
- xdp.rxq = &tfile->xdp_rxq;
- xdp.frame_sz = buflen;
+ xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf, pad, len, false);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -1814,12 +1810,10 @@ drop:
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = msg_control;
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(uarg, false);
+ uarg->callback(NULL, uarg, false);
}
skb_reset_network_header(skb);
@@ -2344,9 +2338,9 @@ static int tun_xdp_one(struct tun_struct *tun,
skb_xdp = true;
goto build;
}
+
+ xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
xdp_set_data_meta_invalid(xdp);
- xdp->rxq = &tfile->xdp_rxq;
- xdp->frame_sz = buflen;
act = bpf_prog_run_xdp(xdp_prog, xdp);
err = tun_xdp_act(tun, xdp_prog, xdp, act);
@@ -2741,7 +2735,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
- /* free_netdev() won't check refcnt, to aovid race
+ /* free_netdev() won't check refcnt, to avoid race
* with dev_put() we need publish tun after registration.
*/
rcu_assign_pointer(tfile->tun, tun);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 2bb28db89432..ef6dd012b8c4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -370,7 +370,7 @@ static struct usb_driver hso_driver;
static struct tty_driver *tty_drv;
static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS];
static struct hso_device *network_table[HSO_MAX_NET_DEVICES];
-static spinlock_t serial_table_lock;
+static DEFINE_SPINLOCK(serial_table_lock);
static const s32 default_port_spec[] = {
HSO_INTF_MUX | HSO_PORT_NETWORK,
@@ -3236,7 +3236,6 @@ static int __init hso_init(void)
pr_info("%s\n", version);
/* Initialise the serial table semaphore and table */
- spin_lock_init(&serial_table_lock);
for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++)
serial_table[i] = NULL;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index af19513a9f75..7ea113f51074 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -186,7 +186,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
net = qmimux_find_dev(dev, hdr->mux_id);
if (!net)
goto skip;
- skbn = netdev_alloc_skb(net, pkt_len);
+ skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER);
if (!skbn)
return 0;
skbn->dev = net;
@@ -203,6 +203,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
goto skip;
}
+ skb_reserve(skbn, LL_MAX_HEADER);
skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len);
if (netif_rx(skbn) != NET_RX_SUCCESS) {
net->stats.rx_errors++;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 02bfcdf50a7a..99caae7d1641 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -654,7 +654,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
struct veth_xdp_tx_bq *bq,
struct veth_stats *stats)
{
- u32 pktlen, headroom, act, metalen;
+ u32 pktlen, headroom, act, metalen, frame_sz;
void *orig_data, *orig_data_end;
struct bpf_prog *xdp_prog;
int mac_len, delta, off;
@@ -710,15 +710,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
skb = nskb;
}
- xdp.data_hard_start = skb->head;
- xdp.data = skb_mac_header(skb);
- xdp.data_end = xdp.data + pktlen;
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
-
/* SKB "head" area always have tailroom for skb_shared_info */
- xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
- xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frame_sz = skb_end_pointer(skb) - skb->head;
+ frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
orig_data = xdp.data;
orig_data_end = xdp.data_end;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 508408fbe78f..ba8e63792549 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -689,12 +689,9 @@ static struct sk_buff *receive_small(struct net_device *dev,
page = xdp_page;
}
- xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
- xdp.data = xdp.data_hard_start + xdp_headroom;
- xdp.data_end = xdp.data + len;
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = buflen;
+ xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
+ xdp_headroom, len, true);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -859,12 +856,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* the descriptor on if we get an XDP_TX return code.
*/
data = page_address(xdp_page) + offset;
- xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
- xdp.data = data + vi->hdr_len;
- xdp.data_end = xdp.data + (len - vi->hdr_len);
- xdp.data_meta = xdp.data;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = frame_sz - vi->hdr_len;
+ xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
+ VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a8ad710629e6..3929e437382b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3283,12 +3283,13 @@ static void vxlan_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->vlan_features = dev->features;
- dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
@@ -4521,17 +4522,12 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
- if (event == NETDEV_UNREGISTER) {
- if (!dev->udp_tunnel_nic_info)
- vxlan_offload_rx_ports(dev, false);
+ if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
- } else if (event == NETDEV_REGISTER) {
- if (!dev->udp_tunnel_nic_info)
- vxlan_offload_rx_ports(dev, true);
- } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
- event == NETDEV_UDP_TUNNEL_DROP_INFO) {
- vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
- }
+ else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
+ vxlan_offload_rx_ports(dev, true);
+ else if (event == NETDEV_UDP_TUNNEL_DROP_INFO)
+ vxlan_offload_rx_ports(dev, false);
return NOTIFY_DONE;
}
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 7c5cf77e9ef1..ecea09fd21cb 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -323,7 +323,7 @@ struct desc {
static int ports_open;
static struct dma_pool *dma_pool;
-static spinlock_t npe_lock;
+static DEFINE_SPINLOCK(npe_lock);
static const struct {
int tx, txdone, rx, rxfree;
@@ -1402,8 +1402,6 @@ static int __init hss_init_module(void)
(IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
return -ENODEV;
- spin_lock_init(&npe_lock);
-
return platform_driver_register(&ixp4xx_hss_driver);
}
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 2fde439543fb..3092a09d3eaa 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -1535,7 +1535,7 @@ sbni_setup( char *p )
goto bad_param;
for( n = 0, parm = 0; *p && n < 8; ) {
- (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
+ (*dest[ parm ])[ n ] = simple_strtoul( p, &p, 0 );
if( !*p || *p == ')' )
return 1;
if( *p == ';' ) {
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8ee24e351bdc..4a16d6e33c09 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -399,7 +399,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+ bool zerocopy_success);
/* Unmap a pending page and release it back to the guest */
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index acb786d8b1d8..08b0e3d0b7eb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -47,7 +47,7 @@
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
-/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
+/* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
* increasing the inflight counter. We need to increase the inflight
* counter because core driver calls into xenvif_zerocopy_callback
* which calls xenvif_skb_zerocopy_complete.
@@ -55,7 +55,7 @@
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb)
{
- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
atomic_inc(&queue->inflight_packets);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index bc3421d14576..19a27dce79d2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1091,7 +1091,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
- uarg->callback(uarg, true);
+ uarg->callback(NULL, uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
/* Fill the skb with the new (local) frags. */
@@ -1228,7 +1228,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+ bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 6f10e0998f1c..a5439c130130 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -411,7 +411,7 @@ static void read_xenbus_frontend_xdp(struct backend_info *be,
vif->xdp_headroom = headroom;
}
-/**
+/*
* Callback received when the frontend's state changes.
*/
static void frontend_changed(struct xenbus_device *dev,
@@ -996,7 +996,7 @@ static int netback_remove(struct xenbus_device *dev)
return 0;
}
-/**
+/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and switch to InitWait.
*/
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b01848ef4649..6ef2adbd283a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -864,12 +864,10 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
u32 act;
int err;
- xdp->data_hard_start = page_address(pdata);
- xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &queue->xdp_rxq;
- xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+ xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
+ &queue->xdp_rxq);
+ xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
+ len, false);
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
@@ -1582,7 +1580,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return ERR_PTR(err);
}
-/**
+/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffers for communication with the backend, and
* inform the backend of the appropriate details for those.
@@ -1659,7 +1657,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
}
}
-/**
+/*
* We are reconnecting to the backend, due to a suspend/resume, or a backend
* driver restart. We tear down our netif structure and recreate it, but
* leave the device-layer structures intact so that this is transparent to the
@@ -2305,7 +2303,7 @@ static int xennet_connect(struct net_device *dev)
return 0;
}
-/**
+/*
* Callback received when the backend's state changes.
*/
static void netback_changed(struct xenbus_device *dev,
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index d524e92051a3..ca3d07fe7f58 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -901,19 +901,14 @@ static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
}
static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
- struct switchdev_trans *trans,
u8 state)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return dpaa2_switch_port_set_stp_state(port_priv, state);
}
static int dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_trans *trans,
unsigned long flags)
{
if (flags & ~(BR_LEARNING | BR_FLOOD))
@@ -923,15 +918,11 @@ static int dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
}
static int dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
- struct switchdev_trans *trans,
unsigned long flags)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err = 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* Learning is enabled per switch */
err = dpaa2_switch_set_learning(port_priv->ethsw_data,
!!(flags & BR_LEARNING));
@@ -945,22 +936,21 @@ exit:
}
static int dpaa2_switch_port_attr_set(struct net_device *netdev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = dpaa2_switch_port_attr_stp_state_set(netdev, trans,
+ err = dpaa2_switch_port_attr_stp_state_set(netdev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- err = dpaa2_switch_port_attr_br_flags_pre_set(netdev, trans,
+ err = dpaa2_switch_port_attr_br_flags_pre_set(netdev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = dpaa2_switch_port_attr_br_flags_set(netdev, trans,
+ err = dpaa2_switch_port_attr_br_flags_set(netdev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
@@ -975,54 +965,49 @@ static int dpaa2_switch_port_attr_set(struct net_device *netdev,
}
static int dpaa2_switch_port_vlans_add(struct net_device *netdev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpsw_attr *attr = &ethsw->sw_attr;
- int vid, err = 0, new_vlans = 0;
-
- if (switchdev_trans_ph_prepare(trans)) {
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (!port_priv->ethsw_data->vlans[vid])
- new_vlans++;
-
- /* Make sure that the VLAN is not already configured
- * on the switch port
- */
- if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER)
- return -EEXIST;
- }
+ int err = 0;
- /* Check if there is space for a new VLAN */
- err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
- &ethsw->sw_attr);
- if (err) {
- netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
- return err;
- }
- if (attr->max_vlans - attr->num_vlans < new_vlans)
- return -ENOSPC;
+ /* Make sure that the VLAN is not already configured
+ * on the switch port
+ */
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ return -EEXIST;
- return 0;
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
}
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (!port_priv->ethsw_data->vlans[vid]) {
- /* this is a new VLAN */
- err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vid);
- if (err)
- return err;
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
- port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
- }
- err = dpaa2_switch_port_add_vlan(port_priv, vid, vlan->flags);
+ if (!port_priv->ethsw_data->vlans[vlan->vid]) {
+ /* this is a new VLAN */
+ err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vlan->vid);
if (err)
- break;
+ return err;
+
+ port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
}
- return err;
+ return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
}
static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
@@ -1043,15 +1028,11 @@ static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc
}
static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* Check if address is already set on this port */
if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
return -EEXIST;
@@ -1070,21 +1051,18 @@ static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
}
static int dpaa2_switch_port_obj_add(struct net_device *netdev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ const struct switchdev_obj *obj)
{
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = dpaa2_switch_port_vlans_add(netdev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = dpaa2_switch_port_mdb_add(netdev,
- SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -1155,18 +1133,11 @@ static int dpaa2_switch_port_vlans_del(struct net_device *netdev,
const struct switchdev_obj_port_vlan *vlan)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int vid, err = 0;
if (netif_is_bridge_master(vlan->obj.orig_dev))
return -EOPNOTSUPP;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = dpaa2_switch_port_del_vlan(port_priv, vid);
- if (err)
- break;
- }
-
- return err;
+ return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
}
static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
@@ -1216,8 +1187,7 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
{
int err;
- err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr,
- port_attr_info->trans);
+ err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr);
port_attr_info->handled = true;
return notifier_from_errno(err);
@@ -1411,8 +1381,7 @@ static int dpaa2_switch_port_obj_event(unsigned long event,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj,
- port_obj_info->trans);
+ err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c8784dfafdd7..df82b124170e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -381,7 +381,8 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net,
}
}
-static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
+static void vhost_zerocopy_callback(struct sk_buff *skb,
+ struct ubuf_info *ubuf, bool success)
{
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
@@ -827,14 +828,15 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
msg.msg_flags &= ~MSG_MORE;
}
- /* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
- vhost_discard_vq_desc(vq, 1);
- vhost_net_enable_vq(net, vq);
- break;
- }
- if (err != len)
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
pr_debug("Truncated TX packet: len %d != %zd\n",
err, len);
done:
@@ -902,6 +904,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
+ ubuf->flags = SKBFL_ZEROCOPY_FRAG;
refcount_set(&ubuf->refcnt, 1);
msg.msg_control = &ctl;
ctl.type = TUN_MSG_UBUF;
@@ -922,7 +925,6 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
msg.msg_flags &= ~MSG_MORE;
}
- /* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
@@ -931,11 +933,13 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
- vhost_discard_vq_desc(vq, 1);
- vhost_net_enable_vq(net, vq);
- break;
- }
- if (err != len)
+ if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ pr_debug("Fail to send packet: err %d", err);
+ } else if (unlikely(err != len))
pr_debug("Truncated TX packet: "
" len %d != %zd\n", err, len);
if (!zcopy_used)