aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/lanai.c3
-rw-r--r--drivers/atm/nicstar.c24
-rw-r--r--drivers/bus/mhi/core/main.c11
-rw-r--r--drivers/crypto/caam/qi.c15
-rw-r--r--drivers/infiniband/core/nldev.c10
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h27
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c15
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c2
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bareudp.c2
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/can/vxcan.c4
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig9
-rw-r--r--drivers/net/dsa/hirschmann/Makefile5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c1339
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h286
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c479
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h58
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c452
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.h76
-rw-r--r--drivers/net/dsa/mt7530.c51
-rw-r--r--drivers/net/dsa/mt7530.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c19
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c105
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c4
-rw-r--r--drivers/net/dsa/ocelot/felix.c27
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c7
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h44
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c151
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c3
-rw-r--r--drivers/net/ethernet/davicom/Kconfig2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c122
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c28
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c51
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h47
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c99
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h189
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h137
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h101
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c377
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h97
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c420
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c500
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c785
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c1334
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h61
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c820
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c307
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c5
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c501
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1552
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h70
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c9
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c38
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c294
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c38
-rw-r--r--drivers/net/ethernet/neterion/s2io.c41
-rw-r--r--drivers/net/ethernet/neterion/s2io.h4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c9
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c109
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c18
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c107
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h58
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c21
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c66
-rw-r--r--drivers/net/ethernet/smsc/Kconfig6
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c17
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c22
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c355
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c41
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c98
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c115
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c56
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c14
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c4
-rw-r--r--drivers/net/fddi/skfp/ecm.c7
-rw-r--r--drivers/net/fddi/skfp/ess.c1
-rw-r--r--drivers/net/fddi/skfp/hwt.c4
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c4
-rw-r--r--drivers/net/fddi/skfp/queue.c4
-rw-r--r--drivers/net/fddi/skfp/rmt.c4
-rw-r--r--drivers/net/fddi/skfp/smtdef.c4
-rw-r--r--drivers/net/fddi/skfp/smtinit.c4
-rw-r--r--drivers/net/fddi/skfp/smttimer.c4
-rw-r--r--drivers/net/fddi/skfp/srf.c5
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/ieee802154/ca8210.c22
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipa/gsi.c387
-rw-r--r--drivers/net/ipa/gsi.h51
-rw-r--r--drivers/net/ipa/gsi_reg.h135
-rw-r--r--drivers/net/ipa/ipa_cmd.c6
-rw-r--r--drivers/net/ipa/ipa_cmd.h21
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c4
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c8
-rw-r--r--drivers/net/ipa/ipa_data.h12
-rw-r--r--drivers/net/ipa/ipa_endpoint.c35
-rw-r--r--drivers/net/ipa/ipa_endpoint.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c6
-rw-r--r--drivers/net/ipa/ipa_interrupt.h16
-rw-r--r--drivers/net/ipa/ipa_main.c156
-rw-r--r--drivers/net/ipa/ipa_mem.c10
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h12
-rw-r--r--drivers/net/ipa/ipa_reg.h343
-rw-r--r--drivers/net/ipa/ipa_table.c4
-rw-r--r--drivers/net/ipa/ipa_uc.c46
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/mhi_net.c316
-rw-r--r--drivers/net/mii.c20
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netconsole.c1
-rw-r--r--drivers/net/netdevsim/dev.c6
-rw-r--r--drivers/net/netdevsim/fib.c265
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/phy/adin.c195
-rw-r--r--drivers/net/phy/amd.c37
-rw-r--r--drivers/net/phy/aquantia_main.c59
-rw-r--r--drivers/net/phy/at803x.c50
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c49
-rw-r--r--drivers/net/phy/bcm-phy-lib.h1
-rw-r--r--drivers/net/phy/bcm54140.c46
-rw-r--r--drivers/net/phy/bcm63xx.c20
-rw-r--r--drivers/net/phy/bcm87xx.c50
-rw-r--r--drivers/net/phy/broadcom.c70
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c63
-rw-r--r--drivers/net/phy/lxt.c94
-rw-r--r--drivers/net/phy/marvell.c188
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/microchip.c24
-rw-r--r--drivers/net/phy/microchip_t1.c29
-rw-r--r--drivers/net/phy/mscc/mscc_main.c70
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c4
-rw-r--r--drivers/net/phy/nxp-tja11xx.c42
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/phy_device.c39
-rw-r--r--drivers/net/phy/phy_led_triggers.c16
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/phy/realtek.c180
-rw-r--r--drivers/net/phy/smsc.c55
-rw-r--r--drivers/net/phy/ste10Xp.c53
-rw-r--r--drivers/net/phy/vitesse.c61
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/tun.c121
-rw-r--r--drivers/net/usb/Kconfig9
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/aqc111.c2
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/int51x1.c2
-rw-r--r--drivers/net/usb/lan78xx.c168
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c41
-rw-r--r--drivers/net/usb/r8152.c40
-rw-r--r--drivers/net/usb/r8153_ecm.c162
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/smsc75xx.c2
-rw-r--r--drivers/net/usb/smsc95xx.c2
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/usb/usbnet.c23
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/vxlan.c29
-rw-r--r--drivers/net/wan/Kconfig60
-rw-r--r--drivers/net/wan/Makefile3
-rw-r--r--drivers/net/wan/dlci.c541
-rw-r--r--drivers/net/wan/hdlc_fr.c118
-rw-r--r--drivers/net/wan/lmc/lmc_main.c9
-rw-r--r--drivers/net/wan/sdla.c1655
-rw-r--r--drivers/net/wan/x25_asy.c836
-rw-r--r--drivers/net/wan/x25_asy.h46
-rw-r--r--drivers/net/wimax/Kconfig18
-rw-r--r--drivers/net/wimax/Makefile2
-rw-r--r--drivers/net/wireguard/device.c2
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c1
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c78
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/xen-netfront.c3
-rw-r--r--drivers/nfc/s3fwrn5/core.c3
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c4
-rw-r--r--drivers/nfc/s3fwrn5/s3fwrn5.h11
-rw-r--r--drivers/ptp/ptp_idt82p33.c274
-rw-r--r--drivers/ptp/ptp_idt82p33.h3
-rw-r--r--drivers/s390/net/qeth_core.h22
-rw-r--r--drivers/s390/net/qeth_core_main.c223
-rw-r--r--drivers/s390/net/qeth_core_mpc.h40
-rw-r--r--drivers/s390/net/qeth_ethtool.c243
-rw-r--r--drivers/s390/net/qeth_l2_main.c33
-rw-r--r--drivers/s390/net/qeth_l3_main.c5
-rw-r--r--drivers/soc/fsl/qbman/qman.c12
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c6
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c6
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/wimax/Documentation/i2400m.rst283
-rw-r--r--drivers/staging/wimax/Documentation/index.rst19
-rw-r--r--drivers/staging/wimax/Documentation/wimax.rst89
-rw-r--r--drivers/staging/wimax/Kconfig46
-rw-r--r--drivers/staging/wimax/Makefile15
-rw-r--r--drivers/staging/wimax/TODO18
-rw-r--r--drivers/staging/wimax/debug-levels.h29
-rw-r--r--drivers/staging/wimax/debugfs.c38
-rw-r--r--drivers/staging/wimax/i2400m/Kconfig (renamed from drivers/net/wimax/i2400m/Kconfig)0
-rw-r--r--drivers/staging/wimax/i2400m/Makefile (renamed from drivers/net/wimax/i2400m/Makefile)0
-rw-r--r--drivers/staging/wimax/i2400m/control.c (renamed from drivers/net/wimax/i2400m/control.c)2
-rw-r--r--drivers/staging/wimax/i2400m/debug-levels.h (renamed from drivers/net/wimax/i2400m/debug-levels.h)2
-rw-r--r--drivers/staging/wimax/i2400m/debugfs.c (renamed from drivers/net/wimax/i2400m/debugfs.c)0
-rw-r--r--drivers/staging/wimax/i2400m/driver.c (renamed from drivers/net/wimax/i2400m/driver.c)2
-rw-r--r--drivers/staging/wimax/i2400m/fw.c (renamed from drivers/net/wimax/i2400m/fw.c)0
-rw-r--r--drivers/staging/wimax/i2400m/i2400m-usb.h (renamed from drivers/net/wimax/i2400m/i2400m-usb.h)0
-rw-r--r--drivers/staging/wimax/i2400m/i2400m.h (renamed from drivers/net/wimax/i2400m/i2400m.h)4
-rw-r--r--drivers/staging/wimax/i2400m/linux-wimax-i2400m.h572
-rw-r--r--drivers/staging/wimax/i2400m/netdev.c (renamed from drivers/net/wimax/i2400m/netdev.c)0
-rw-r--r--drivers/staging/wimax/i2400m/op-rfkill.c (renamed from drivers/net/wimax/i2400m/op-rfkill.c)2
-rw-r--r--drivers/staging/wimax/i2400m/rx.c (renamed from drivers/net/wimax/i2400m/rx.c)0
-rw-r--r--drivers/staging/wimax/i2400m/sysfs.c (renamed from drivers/net/wimax/i2400m/sysfs.c)0
-rw-r--r--drivers/staging/wimax/i2400m/tx.c (renamed from drivers/net/wimax/i2400m/tx.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb-debug-levels.h (renamed from drivers/net/wimax/i2400m/usb-debug-levels.h)2
-rw-r--r--drivers/staging/wimax/i2400m/usb-fw.c (renamed from drivers/net/wimax/i2400m/usb-fw.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb-notif.c (renamed from drivers/net/wimax/i2400m/usb-notif.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb-rx.c (renamed from drivers/net/wimax/i2400m/usb-rx.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb-tx.c (renamed from drivers/net/wimax/i2400m/usb-tx.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb.c (renamed from drivers/net/wimax/i2400m/usb.c)2
-rw-r--r--drivers/staging/wimax/id-table.c130
-rw-r--r--drivers/staging/wimax/linux-wimax-debug.h491
-rw-r--r--drivers/staging/wimax/linux-wimax.h239
-rw-r--r--drivers/staging/wimax/net-wimax.h503
-rw-r--r--drivers/staging/wimax/op-msg.c391
-rw-r--r--drivers/staging/wimax/op-reset.c108
-rw-r--r--drivers/staging/wimax/op-rfkill.c431
-rw-r--r--drivers/staging/wimax/op-state-get.c52
-rw-r--r--drivers/staging/wimax/stack.c616
-rw-r--r--drivers/staging/wimax/wimax-internal.h85
350 files changed, 19056 insertions, 7385 deletions
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index ac811cfa6843..d7277c26e423 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -765,8 +765,7 @@ static void lanai_shutdown_tx_vci(struct lanai_dev *lanai,
struct sk_buff *skb;
unsigned long flags, timeout;
int read, write, lastread = -1;
- APRINTK(!in_interrupt(),
- "lanai_shutdown_tx_vci called w/o process context!\n");
+
if (lvcc->vbase == NULL) /* We were never bound to a VCI */
return;
/* 15.2.1 - wait for queue to drain */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 09ad73361879..5c7e4df159b9 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -130,8 +130,9 @@ static int ns_open(struct atm_vcc *vcc);
static void ns_close(struct atm_vcc *vcc);
static void fill_tst(ns_dev * card, int n, vc_map * vc);
static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
+static int ns_send_bh(struct atm_vcc *vcc, struct sk_buff *skb);
static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
- struct sk_buff *skb);
+ struct sk_buff *skb, bool may_sleep);
static void process_tsq(ns_dev * card);
static void drain_scq(ns_dev * card, scq_info * scq, int pos);
static void process_rsq(ns_dev * card);
@@ -160,6 +161,7 @@ static const struct atmdev_ops atm_ops = {
.close = ns_close,
.ioctl = ns_ioctl,
.send = ns_send,
+ .send_bh = ns_send_bh,
.phy_put = ns_phy_put,
.phy_get = ns_phy_get,
.proc_read = ns_proc_read,
@@ -1620,7 +1622,7 @@ static void fill_tst(ns_dev * card, int n, vc_map * vc)
card->tst_addr = new_tst;
}
-static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+static int _ns_send(struct atm_vcc *vcc, struct sk_buff *skb, bool may_sleep)
{
ns_dev *card;
vc_map *vc;
@@ -1704,7 +1706,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
scq = card->scq0;
}
- if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+ if (push_scqe(card, vc, scq, &scqe, skb, may_sleep) != 0) {
atomic_inc(&vcc->stats->tx_err);
dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len,
DMA_TO_DEVICE);
@@ -1716,8 +1718,18 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
return 0;
}
+static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ return _ns_send(vcc, skb, true);
+}
+
+static int ns_send_bh(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ return _ns_send(vcc, skb, false);
+}
+
static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool may_sleep)
{
unsigned long flags;
ns_scqe tsr;
@@ -1728,7 +1740,7 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
spin_lock_irqsave(&scq->lock, flags);
while (scq->tail == scq->next) {
- if (in_interrupt()) {
+ if (!may_sleep) {
spin_unlock_irqrestore(&scq->lock, flags);
printk("nicstar%d: Error pushing TBD.\n", card->index);
return 1;
@@ -1773,7 +1785,7 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
int has_run = 0;
while (scq->tail == scq->next) {
- if (in_interrupt()) {
+ if (!may_sleep) {
data = scq_virt_to_bus(scq, scq->next);
ns_write_sram(card, scq->scd, &data, 1);
spin_unlock_irqrestore(&scq->lock, flags);
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 2cff5ddff225..ba9e721d61b7 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -1165,6 +1165,17 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return mhi_is_ring_full(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_is_full);
+
int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan,
enum mhi_cmd_type cmd)
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index ec53528d8205..8163f5df8ebf 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -545,14 +545,10 @@ static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
}
}
-static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
+static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np,
+ bool sched_napi)
{
- /*
- * In case of threaded ISR, for RT kernels in_irq() does not return
- * appropriate value, so use in_serving_softirq to distinguish between
- * softirq and irq contexts.
- */
- if (unlikely(in_irq() || !in_serving_softirq())) {
+ if (sched_napi) {
/* Disable QMan IRQ source and invoke NAPI */
qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
np->p = p;
@@ -564,7 +560,8 @@ static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct qman_fq *rsp_fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
struct caam_drv_req *drv_req;
@@ -573,7 +570,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct caam_drv_private *priv = dev_get_drvdata(qidev);
u32 status;
- if (caam_qi_napi_schedule(p, caam_napi))
+ if (caam_qi_napi_schedule(p, caam_napi, sched_napi))
return qman_cb_dqrr_stop;
fd = &dqrr->fd;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 12d29d54a081..08366e254b1d 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -932,7 +932,7 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
char name[IB_DEVICE_NAME_MAX] = {};
- nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
IB_DEVICE_NAME_MAX);
if (strlen(name) == 0) {
err = -EINVAL;
@@ -1529,13 +1529,13 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
!tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
return -EINVAL;
- nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
sizeof(ibdev_name));
if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
return -EINVAL;
- nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
- nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
+ nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
+ nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
sizeof(ndev_name));
ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
@@ -1602,7 +1602,7 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
return -EINVAL;
- nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
+ nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
sizeof(client_name));
if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index a40701a6e1b6..0b64aa87ab73 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1686,7 +1686,6 @@ static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
u32 extra_bytes;
u32 tlen, qpnum;
bool do_work, do_cnp;
- struct hfi1_ipoib_dev_priv *priv;
trace_hfi1_rcvhdr(packet);
@@ -1734,8 +1733,7 @@ static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
if (unlikely(!skb))
goto drop;
- priv = hfi1_ipoib_priv(netdev);
- hfi1_ipoib_update_rx_netstats(priv, 1, skb->len);
+ dev_sw_netstats_rx_add(netdev, skb->len);
skb->dev = netdev;
skb->pkt_type = PACKET_HOST;
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index b8c9d0a003fb..f650cac9d424 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -110,7 +110,6 @@ struct hfi1_ipoib_dev_priv {
const struct net_device_ops *netdev_ops;
struct rvt_qp *qp;
- struct pcpu_sw_netstats __percpu *netstats;
};
/* hfi1 ipoib rdma netdev's private data structure */
@@ -126,32 +125,6 @@ hfi1_ipoib_priv(const struct net_device *dev)
return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
}
-static inline void
-hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv *priv,
- u64 packets,
- u64 bytes)
-{
- struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
-
- u64_stats_update_begin(&netstats->syncp);
- netstats->rx_packets += packets;
- netstats->rx_bytes += bytes;
- u64_stats_update_end(&netstats->syncp);
-}
-
-static inline void
-hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv *priv,
- u64 packets,
- u64 bytes)
-{
- struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
-
- u64_stats_update_begin(&netstats->syncp);
- netstats->tx_packets += packets;
- netstats->tx_bytes += bytes;
- u64_stats_update_end(&netstats->syncp);
-}
-
int hfi1_ipoib_send_dma(struct net_device *dev,
struct sk_buff *skb,
struct ib_ah *address,
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index 9f71b9d706bd..3242290eb6a7 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -21,7 +21,7 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
int ret;
- priv->netstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
ret = priv->netdev_ops->ndo_init(dev);
if (ret)
@@ -93,21 +93,12 @@ static int hfi1_ipoib_dev_stop(struct net_device *dev)
return priv->netdev_ops->ndo_stop(dev);
}
-static void hfi1_ipoib_dev_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *storage)
-{
- struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
-
- netdev_stats_to_stats64(storage, &dev->stats);
- dev_fetch_sw_netstats(storage, priv->netstats);
-}
-
static const struct net_device_ops hfi1_ipoib_netdev_ops = {
.ndo_init = hfi1_ipoib_dev_init,
.ndo_uninit = hfi1_ipoib_dev_uninit,
.ndo_open = hfi1_ipoib_dev_open,
.ndo_stop = hfi1_ipoib_dev_stop,
- .ndo_get_stats64 = hfi1_ipoib_dev_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
};
static int hfi1_ipoib_send(struct net_device *dev,
@@ -182,7 +173,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
hfi1_ipoib_txreq_deinit(priv);
hfi1_ipoib_rxq_deinit(priv->netdev);
- free_percpu(priv->netstats);
+ free_percpu(dev->tstats);
}
static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 9df292b51a05..edd4eeac8dd1 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -121,7 +121,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
struct hfi1_ipoib_dev_priv *priv = tx->priv;
if (likely(!tx->sdma_status)) {
- hfi1_ipoib_update_tx_netstats(priv, 1, tx->skb->len);
+ dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len);
} else {
++priv->netdev->stats.tx_errors;
dd_dev_warn(priv->dd,
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c3dbe64e628e..4ee41924cdf1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -426,6 +426,13 @@ config VSOCKMON
mostly intended for developers or support to debug vsock issues. If
unsure, say N.
+config MHI_NET
+ tristate "MHI network driver"
+ depends on MHI_BUS
+ help
+ This is the network driver for MHI bus. It can be used with
+ QCOM based WWAN modems (like SDX55). Say Y or M.
+
endif # NET_CORE
config SUNGEM_PHY
@@ -489,8 +496,6 @@ source "drivers/net/usb/Kconfig"
source "drivers/net/wireless/Kconfig"
-source "drivers/net/wimax/Kconfig"
-
source "drivers/net/wan/Kconfig"
source "drivers/net/ieee802154/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 72e18d505d1a..36e2e41ed2aa 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
+obj-$(CONFIG_MHI_NET) += mhi_net.o
#
# Networking Drivers
@@ -66,7 +67,6 @@ obj-$(CONFIG_NET_SB1000) += sb1000.o
obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
-obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_IEEE802154) += ieee802154/
obj-$(CONFIG_VMXNET3) += vmxnet3/
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index ff0bea1554f9..28257bccec41 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -510,7 +510,7 @@ static const struct net_device_ops bareudp_netdev_ops = {
.ndo_open = bareudp_open,
.ndo_stop = bareudp_stop,
.ndo_start_xmit = bareudp_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 84ecbc6fa0ff..71c9677d135f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1228,14 +1228,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
}
#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_ALL_TSO)
+ NETIF_F_GSO_SOFTWARE)
static void bond_compute_features(struct bonding *bond)
@@ -1291,8 +1291,7 @@ done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX |
- NETIF_F_GSO_UDP_L4;
+ NETIF_F_HW_VLAN_STAG_TX;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -4721,7 +4720,7 @@ void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+ bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index d6ba9426be4d..fa47bab510bb 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -186,7 +186,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
}
if (ifmp && tbp[IFLA_IFNAME]) {
- nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
@@ -223,7 +223,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
/* register first device */
if (tb[IFLA_IFNAME])
- nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
else
snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2451f61a38e4..f6a0488589fc 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -24,6 +24,8 @@ config NET_DSA_LOOP
This enables support for a fake mock-up switch chip which
exercises the DSA APIs.
+source "drivers/net/dsa/hirschmann/Kconfig"
+
config NET_DSA_LANTIQ_GSWIP
tristate "Lantiq / Intel GSWIP"
depends on HAS_IOMEM && NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 4a943ccc2ca4..a84adb140a04 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
+obj-y += hirschmann/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
new file mode 100644
index 000000000000..222dd35e2c9d
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+config NET_DSA_HIRSCHMANN_HELLCREEK
+ tristate "Hirschmann Hellcreek TSN Switch support"
+ depends on HAS_IOMEM
+ depends on NET_DSA
+ depends on PTP_1588_CLOCK
+ select NET_DSA_TAG_HELLCREEK
+ help
+ This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/hirschmann/Makefile b/drivers/net/dsa/hirschmann/Makefile
new file mode 100644
index 000000000000..f4075c2998b5
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK) += hellcreek_sw.o
+hellcreek_sw-objs := hellcreek.o
+hellcreek_sw-objs += hellcreek_ptp.o
+hellcreek_sw-objs += hellcreek_hwtstamp.o
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
new file mode 100644
index 000000000000..d42f40c76ba5
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: (GPL-2.0 or MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <net/dsa.h>
+
+#include "hellcreek.h"
+#include "hellcreek_ptp.h"
+#include "hellcreek_hwtstamp.h"
+
+static const struct hellcreek_counter hellcreek_counter[] = {
+ { 0x00, "RxFiltered", },
+ { 0x01, "RxOctets1k", },
+ { 0x02, "RxVTAG", },
+ { 0x03, "RxL2BAD", },
+ { 0x04, "RxOverloadDrop", },
+ { 0x05, "RxUC", },
+ { 0x06, "RxMC", },
+ { 0x07, "RxBC", },
+ { 0x08, "RxRS<64", },
+ { 0x09, "RxRS64", },
+ { 0x0a, "RxRS65_127", },
+ { 0x0b, "RxRS128_255", },
+ { 0x0c, "RxRS256_511", },
+ { 0x0d, "RxRS512_1023", },
+ { 0x0e, "RxRS1024_1518", },
+ { 0x0f, "RxRS>1518", },
+ { 0x10, "TxTailDropQueue0", },
+ { 0x11, "TxTailDropQueue1", },
+ { 0x12, "TxTailDropQueue2", },
+ { 0x13, "TxTailDropQueue3", },
+ { 0x14, "TxTailDropQueue4", },
+ { 0x15, "TxTailDropQueue5", },
+ { 0x16, "TxTailDropQueue6", },
+ { 0x17, "TxTailDropQueue7", },
+ { 0x18, "RxTrafficClass0", },
+ { 0x19, "RxTrafficClass1", },
+ { 0x1a, "RxTrafficClass2", },
+ { 0x1b, "RxTrafficClass3", },
+ { 0x1c, "RxTrafficClass4", },
+ { 0x1d, "RxTrafficClass5", },
+ { 0x1e, "RxTrafficClass6", },
+ { 0x1f, "RxTrafficClass7", },
+ { 0x21, "TxOctets1k", },
+ { 0x22, "TxVTAG", },
+ { 0x23, "TxL2BAD", },
+ { 0x25, "TxUC", },
+ { 0x26, "TxMC", },
+ { 0x27, "TxBC", },
+ { 0x28, "TxTS<64", },
+ { 0x29, "TxTS64", },
+ { 0x2a, "TxTS65_127", },
+ { 0x2b, "TxTS128_255", },
+ { 0x2c, "TxTS256_511", },
+ { 0x2d, "TxTS512_1023", },
+ { 0x2e, "TxTS1024_1518", },
+ { 0x2f, "TxTS>1518", },
+ { 0x30, "TxTrafficClassOverrun0", },
+ { 0x31, "TxTrafficClassOverrun1", },
+ { 0x32, "TxTrafficClassOverrun2", },
+ { 0x33, "TxTrafficClassOverrun3", },
+ { 0x34, "TxTrafficClassOverrun4", },
+ { 0x35, "TxTrafficClassOverrun5", },
+ { 0x36, "TxTrafficClassOverrun6", },
+ { 0x37, "TxTrafficClassOverrun7", },
+ { 0x38, "TxTrafficClass0", },
+ { 0x39, "TxTrafficClass1", },
+ { 0x3a, "TxTrafficClass2", },
+ { 0x3b, "TxTrafficClass3", },
+ { 0x3c, "TxTrafficClass4", },
+ { 0x3d, "TxTrafficClass5", },
+ { 0x3e, "TxTrafficClass6", },
+ { 0x3f, "TxTrafficClass7", },
+};
+
+static u16 hellcreek_read(struct hellcreek *hellcreek, unsigned int offset)
+{
+ return readw(hellcreek->base + offset);
+}
+
+static u16 hellcreek_read_ctrl(struct hellcreek *hellcreek)
+{
+ return readw(hellcreek->base + HR_CTRL_C);
+}
+
+static u16 hellcreek_read_stat(struct hellcreek *hellcreek)
+{
+ return readw(hellcreek->base + HR_SWSTAT);
+}
+
+static void hellcreek_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset)
+{
+ writew(data, hellcreek->base + offset);
+}
+
+static void hellcreek_select_port(struct hellcreek *hellcreek, int port)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
+{
+ u16 val = prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
+{
+ u16 val = counter << HR_CSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_CSEL);
+
+ /* Data sheet states to wait at least 20 internal clock cycles */
+ ndelay(200);
+}
+
+static void hellcreek_select_vlan(struct hellcreek *hellcreek, int vid,
+ bool pvid)
+{
+ u16 val = 0;
+
+ /* Set pvid bit first */
+ if (pvid)
+ val |= HR_VIDCFG_PVID;
+ hellcreek_write(hellcreek, val, HR_VIDCFG);
+
+ /* Set vlan */
+ val |= vid << HR_VIDCFG_VID_SHIFT;
+ hellcreek_write(hellcreek, val, HR_VIDCFG);
+}
+
+static int hellcreek_wait_until_ready(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ /* Wait up to 1ms, although 3 us should be enough */
+ return readx_poll_timeout(hellcreek_read_ctrl, hellcreek,
+ val, val & HR_CTRL_C_READY,
+ 3, 1000);
+}
+
+static int hellcreek_wait_until_transitioned(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ return readx_poll_timeout_atomic(hellcreek_read_ctrl, hellcreek,
+ val, !(val & HR_CTRL_C_TRANSITION),
+ 1, 1000);
+}
+
+static int hellcreek_wait_fdb_ready(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ return readx_poll_timeout_atomic(hellcreek_read_stat, hellcreek,
+ val, !(val & HR_SWSTAT_BUSY),
+ 1, 1000);
+}
+
+static int hellcreek_detect(struct hellcreek *hellcreek)
+{
+ u16 id, rel_low, rel_high, date_low, date_high, tgd_ver;
+ u8 tgd_maj, tgd_min;
+ u32 rel, date;
+
+ id = hellcreek_read(hellcreek, HR_MODID_C);
+ rel_low = hellcreek_read(hellcreek, HR_REL_L_C);
+ rel_high = hellcreek_read(hellcreek, HR_REL_H_C);
+ date_low = hellcreek_read(hellcreek, HR_BLD_L_C);
+ date_high = hellcreek_read(hellcreek, HR_BLD_H_C);
+ tgd_ver = hellcreek_read(hellcreek, TR_TGDVER);
+
+ if (id != hellcreek->pdata->module_id)
+ return -ENODEV;
+
+ rel = rel_low | (rel_high << 16);
+ date = date_low | (date_high << 16);
+ tgd_maj = (tgd_ver & TR_TGDVER_REV_MAJ_MASK) >> TR_TGDVER_REV_MAJ_SHIFT;
+ tgd_min = (tgd_ver & TR_TGDVER_REV_MIN_MASK) >> TR_TGDVER_REV_MIN_SHIFT;
+
+ dev_info(hellcreek->dev, "Module ID=%02x Release=%04x Date=%04x TGD Version=%02x.%02x\n",
+ id, rel, date, tgd_maj, tgd_min);
+
+ return 0;
+}
+
+static void hellcreek_feature_detect(struct hellcreek *hellcreek)
+{
+ u16 features;
+
+ features = hellcreek_read(hellcreek, HR_FEABITS0);
+
+ /* Currently we only detect the size of the FDB table */
+ hellcreek->fdb_entries = ((features & HR_FEABITS0_FDBBINS_MASK) >>
+ HR_FEABITS0_FDBBINS_SHIFT) * 32;
+
+ dev_info(hellcreek->dev, "Feature detect: FDB entries=%zu\n",
+ hellcreek->fdb_entries);
+}
+
+static enum dsa_tag_protocol hellcreek_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_HELLCREEK;
+}
+
+static int hellcreek_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Enable port %d\n", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ val |= HR_PTCFG_ADMIN_EN;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static void hellcreek_port_disable(struct dsa_switch *ds, int port)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Disable port %d\n", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ val &= ~HR_PTCFG_ADMIN_EN;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
+ const struct hellcreek_counter *counter = &hellcreek_counter[i];
+
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ counter->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int hellcreek_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(hellcreek_counter);
+}
+
+static void hellcreek_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ int i;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
+ const struct hellcreek_counter *counter = &hellcreek_counter[i];
+ u8 offset = counter->offset + port * 64;
+ u16 high, low;
+ u64 value;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_counter(hellcreek, offset);
+
+ /* The registers are locked internally by selecting the
+ * counter. So low and high can be read without reading high
+ * again.
+ */
+ high = hellcreek_read(hellcreek, HR_CRDH);
+ low = hellcreek_read(hellcreek, HR_CRDL);
+ value = ((u64)high << 16) | low;
+
+ hellcreek_port->counter_values[i] += value;
+ data[i] = hellcreek_port->counter_values[i];
+
+ mutex_unlock(&hellcreek->reg_lock);
+ }
+}
+
+static u16 hellcreek_private_vid(int port)
+{
+ return VLAN_N_VID - port + 1;
+}
+
+static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ int i;
+
+ dev_dbg(hellcreek->dev, "VLAN prepare for port %d\n", port);
+
+ /* Restriction: Make sure that nobody uses the "private" VLANs. These
+ * VLANs are internally used by the driver to ensure port
+ * separation. Thus, they cannot be used by someone else.
+ */
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ const u16 restricted_vid = hellcreek_private_vid(i);
+ u16 vid;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ if (vid == restricted_vid)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void hellcreek_select_vlan_params(struct hellcreek *hellcreek, int port,
+ int *shift, int *mask)
+{
+ switch (port) {
+ case 0:
+ *shift = HR_VIDMBRCFG_P0MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P0MBR_MASK;
+ break;
+ case 1:
+ *shift = HR_VIDMBRCFG_P1MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P1MBR_MASK;
+ break;
+ case 2:
+ *shift = HR_VIDMBRCFG_P2MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P2MBR_MASK;
+ break;
+ case 3:
+ *shift = HR_VIDMBRCFG_P3MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P3MBR_MASK;
+ break;
+ default:
+ *shift = *mask = 0;
+ dev_err(hellcreek->dev, "Unknown port %d selected!\n", port);
+ }
+}
+
+static void hellcreek_apply_vlan(struct hellcreek *hellcreek, int port, u16 vid,
+ bool pvid, bool untagged)
+{
+ int shift, mask;
+ u16 val;
+
+ dev_dbg(hellcreek->dev, "Apply VLAN: port=%d vid=%u pvid=%d untagged=%d",
+ port, vid, pvid, untagged);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_select_vlan(hellcreek, vid, pvid);
+
+ /* Setup port vlan membership */
+ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
+ val = hellcreek->vidmbrcfg[vid];
+ val &= ~mask;
+ if (untagged)
+ val |= HELLCREEK_VLAN_UNTAGGED_MEMBER << shift;
+ else
+ val |= HELLCREEK_VLAN_TAGGED_MEMBER << shift;
+
+ hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
+ hellcreek->vidmbrcfg[vid] = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
+ u16 vid)
+{
+ int shift, mask;
+ u16 val;
+
+ dev_dbg(hellcreek->dev, "Unapply VLAN: port=%d vid=%u\n", port, vid);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_vlan(hellcreek, vid, 0);
+
+ /* Setup port vlan membership */
+ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
+ val = hellcreek->vidmbrcfg[vid];
+ val &= ~mask;
+ val |= HELLCREEK_VLAN_NO_MEMBER << shift;
+
+ hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
+ hellcreek->vidmbrcfg[vid] = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct hellcreek *hellcreek = ds->priv;
+ u16 vid;
+
+ dev_dbg(hellcreek->dev, "Add VLANs (%d -- %d) on port %d, %s, %s\n",
+ vlan->vid_begin, vlan->vid_end, port,
+ untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ hellcreek_apply_vlan(hellcreek, port, vid, pvid, untagged);
+}
+
+static int hellcreek_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 vid;
+
+ dev_dbg(hellcreek->dev, "Remove VLANs (%d -- %d) on port %d\n",
+ vlan->vid_begin, vlan->vid_end, port);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ hellcreek_unapply_vlan(hellcreek, port, vid);
+
+ return 0;
+}
+
+static void hellcreek_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ const char *new_state;
+ u16 val;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_port = &hellcreek->ports[port];
+ val = hellcreek_port->ptcfg;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ new_state = "DISABLED";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_BLOCKING:
+ new_state = "BLOCKING";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_LISTENING:
+ new_state = "LISTENING";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_LEARNING:
+ new_state = "LEARNING";
+ val |= HR_PTCFG_BLOCKED;
+ val |= HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_FORWARDING:
+ new_state = "FORWARDING";
+ val &= ~HR_PTCFG_BLOCKED;
+ val |= HR_PTCFG_LEARNING_EN;
+ break;
+ default:
+ new_state = "UNKNOWN";
+ }
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ dev_dbg(hellcreek->dev, "Configured STP state for port %d: %s\n",
+ port, new_state);
+}
+
+static void hellcreek_setup_ingressflt(struct hellcreek *hellcreek, int port,
+ bool enable)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ u16 ptcfg;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ptcfg = hellcreek_port->ptcfg;
+
+ if (enable)
+ ptcfg |= HR_PTCFG_INGRESSFLT;
+ else
+ ptcfg &= ~HR_PTCFG_INGRESSFLT;
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+ hellcreek_port->ptcfg = ptcfg;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_setup_vlan_awareness(struct hellcreek *hellcreek,
+ bool enable)
+{
+ u16 swcfg;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ swcfg = hellcreek->swcfg;
+
+ if (enable)
+ swcfg |= HR_SWCFG_VLAN_UNAWARE;
+ else
+ swcfg &= ~HR_SWCFG_VLAN_UNAWARE;
+
+ hellcreek_write(hellcreek, swcfg, HR_SWCFG);
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+/* Default setup for DSA: VLAN <X>: CPU and Port <X> egress untagged. */
+static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port,
+ bool enabled)
+{
+ const u16 vid = hellcreek_private_vid(port);
+ int upstream = dsa_upstream_port(ds, port);
+ struct hellcreek *hellcreek = ds->priv;
+
+ /* Apply vid to port as egress untagged and port vlan id */
+ if (enabled)
+ hellcreek_apply_vlan(hellcreek, port, vid, true, true);
+ else
+ hellcreek_unapply_vlan(hellcreek, port, vid);
+
+ /* Apply vid to cpu port as well */
+ if (enabled)
+ hellcreek_apply_vlan(hellcreek, upstream, vid, false, true);
+ else
+ hellcreek_unapply_vlan(hellcreek, upstream, vid);
+}
+
+static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Port %d joins a bridge\n", port);
+
+ /* When joining a vlan_filtering bridge, keep the switch VLAN aware */
+ if (!ds->vlan_filtering)
+ hellcreek_setup_vlan_awareness(hellcreek, false);
+
+ /* Drop private vlans */
+ hellcreek_setup_vlan_membership(ds, port, false);
+
+ return 0;
+}
+
+static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Port %d leaves a bridge\n", port);
+
+ /* Enable VLAN awareness */
+ hellcreek_setup_vlan_awareness(hellcreek, true);
+
+ /* Enable private vlans */
+ hellcreek_setup_vlan_membership(ds, port, true);
+}
+
+static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
+ const struct hellcreek_fdb_entry *entry)
+{
+ u16 meta = 0;
+
+ dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, "
+ "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask,
+ entry->is_obt, entry->reprio_en, entry->reprio_tc);
+
+ /* Add mac address */
+ hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH);
+ hellcreek_write(hellcreek, entry->mac[3] | (entry->mac[2] << 8), HR_FDBWDM);
+ hellcreek_write(hellcreek, entry->mac[5] | (entry->mac[4] << 8), HR_FDBWDL);
+
+ /* Meta data */
+ meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT;
+ if (entry->is_obt)
+ meta |= HR_FDBWRM0_OBT;
+ if (entry->reprio_en) {
+ meta |= HR_FDBWRM0_REPRIO_EN;
+ meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT;
+ }
+ hellcreek_write(hellcreek, meta, HR_FDBWRM0);
+
+ /* Commit */
+ hellcreek_write(hellcreek, 0x00, HR_FDBWRCMD);
+
+ /* Wait until done */
+ return hellcreek_wait_fdb_ready(hellcreek);
+}
+
+static int __hellcreek_fdb_del(struct hellcreek *hellcreek,
+ const struct hellcreek_fdb_entry *entry)
+{
+ dev_dbg(hellcreek->dev, "Delete FDB entry: MAC=%pM!\n", entry->mac);
+
+ /* Delete by matching idx */
+ hellcreek_write(hellcreek, entry->idx | HR_FDBWRCMD_FDBDEL, HR_FDBWRCMD);
+
+ /* Wait until done */
+ return hellcreek_wait_fdb_ready(hellcreek);
+}
+
+/* Retrieve the index of a FDB entry by mac address. Currently we search through
+ * the complete table in hardware. If that's too slow, we might have to cache
+ * the complete FDB table in software.
+ */
+static int hellcreek_fdb_get(struct hellcreek *hellcreek,
+ const unsigned char *dest,
+ struct hellcreek_fdb_entry *entry)
+{
+ size_t i;
+
+ /* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
+ * should reset the internal pointer. But, that doesn't work. The vendor
+ * suggested a subsequent write as workaround. Same for HR_FDBRDH below.
+ */
+ hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ /* We have to read the complete table, because the switch/driver might
+ * enter new entries anywhere.
+ */
+ for (i = 0; i < hellcreek->fdb_entries; ++i) {
+ unsigned char addr[ETH_ALEN];
+ u16 meta, mac;
+
+ meta = hellcreek_read(hellcreek, HR_FDBMDRD);
+ mac = hellcreek_read(hellcreek, HR_FDBRDL);
+ addr[5] = mac & 0xff;
+ addr[4] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDM);
+ addr[3] = mac & 0xff;
+ addr[2] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDH);
+ addr[1] = mac & 0xff;
+ addr[0] = (mac & 0xff00) >> 8;
+
+ /* Force next entry */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+
+ if (memcmp(addr, dest, ETH_ALEN))
+ continue;
+
+ /* Match found */
+ entry->idx = i;
+ entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
+ HR_FDBMDRD_PORTMASK_SHIFT;
+ entry->age = (meta & HR_FDBMDRD_AGE_MASK) >>
+ HR_FDBMDRD_AGE_SHIFT;
+ entry->is_obt = !!(meta & HR_FDBMDRD_OBT);
+ entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED);
+ entry->is_static = !!(meta & HR_FDBMDRD_STATIC);
+ entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >>
+ HR_FDBMDRD_REPRIO_TC_SHIFT;
+ entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN);
+ memcpy(entry->mac, addr, sizeof(addr));
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct hellcreek_fdb_entry entry = { 0 };
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ dev_dbg(hellcreek->dev, "Add FDB entry for MAC=%pM\n", addr);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ret = hellcreek_fdb_get(hellcreek, addr, &entry);
+ if (ret) {
+ /* Not found */
+ memcpy(entry.mac, addr, sizeof(entry.mac));
+ entry.portmask = BIT(port);
+
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ } else {
+ /* Found */
+ ret = __hellcreek_fdb_del(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
+ goto out;
+ }
+
+ entry.portmask |= BIT(port);
+
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct hellcreek_fdb_entry entry = { 0 };
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ dev_dbg(hellcreek->dev, "Delete FDB entry for MAC=%pM\n", addr);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ret = hellcreek_fdb_get(hellcreek, addr, &entry);
+ if (ret) {
+ /* Not found */
+ dev_err(hellcreek->dev, "FDB entry for deletion not found!\n");
+ } else {
+ /* Found */
+ ret = __hellcreek_fdb_del(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
+ goto out;
+ }
+
+ entry.portmask &= ~BIT(port);
+
+ if (entry.portmask != 0x00) {
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 entries;
+ size_t i;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
+ * should reset the internal pointer. But, that doesn't work. The vendor
+ * suggested a subsequent write as workaround. Same for HR_FDBRDH below.
+ */
+ entries = hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ dev_dbg(hellcreek->dev, "FDB dump for port %d, entries=%d!\n", port, entries);
+
+ /* Read table */
+ for (i = 0; i < hellcreek->fdb_entries; ++i) {
+ unsigned char null_addr[ETH_ALEN] = { 0 };
+ struct hellcreek_fdb_entry entry = { 0 };
+ u16 meta, mac;
+
+ meta = hellcreek_read(hellcreek, HR_FDBMDRD);
+ mac = hellcreek_read(hellcreek, HR_FDBRDL);
+ entry.mac[5] = mac & 0xff;
+ entry.mac[4] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDM);
+ entry.mac[3] = mac & 0xff;
+ entry.mac[2] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDH);
+ entry.mac[1] = mac & 0xff;
+ entry.mac[0] = (mac & 0xff00) >> 8;
+
+ /* Force next entry */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+
+ /* Check valid */
+ if (!memcmp(entry.mac, null_addr, ETH_ALEN))
+ continue;
+
+ entry.portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
+ HR_FDBMDRD_PORTMASK_SHIFT;
+ entry.is_static = !!(meta & HR_FDBMDRD_STATIC);
+
+ /* Check port mask */
+ if (!(entry.portmask & BIT(port)))
+ continue;
+
+ cb(entry.mac, 0, entry.is_static, data);
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n",
+ vlan_filtering ? "Enable" : "Disable", port);
+
+ /* Configure port to drop packages with not known vids */
+ hellcreek_setup_ingressflt(hellcreek, port, vlan_filtering);
+
+ /* Enable VLAN awareness on the switch. This save due to
+ * ds->vlan_filtering_is_global.
+ */
+ hellcreek_setup_vlan_awareness(hellcreek, vlan_filtering);
+
+ return 0;
+}
+
+static int hellcreek_enable_ip_core(struct hellcreek *hellcreek)
+{
+ int ret;
+ u16 val;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ val = hellcreek_read(hellcreek, HR_CTRL_C);
+ val |= HR_CTRL_C_ENABLE;
+ hellcreek_write(hellcreek, val, HR_CTRL_C);
+ ret = hellcreek_wait_until_transitioned(hellcreek);
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static void hellcreek_setup_cpu_and_tunnel_port(struct hellcreek *hellcreek)
+{
+ struct hellcreek_port *tunnel_port = &hellcreek->ports[TUNNEL_PORT];
+ struct hellcreek_port *cpu_port = &hellcreek->ports[CPU_PORT];
+ u16 ptcfg = 0;
+
+ ptcfg |= HR_PTCFG_LEARNING_EN | HR_PTCFG_ADMIN_EN;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, CPU_PORT);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+
+ hellcreek_select_port(hellcreek, TUNNEL_PORT);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+
+ cpu_port->ptcfg = ptcfg;
+ tunnel_port->ptcfg = ptcfg;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
+{
+ int i;
+
+ /* The switch has multiple egress queues per port. The queue is selected
+ * via the PCP field in the VLAN header. The switch internally deals
+ * with traffic classes instead of PCP values and this mapping is
+ * configurable.
+ *
+ * The default mapping is (PCP - TC):
+ * 7 - 7
+ * 6 - 6
+ * 5 - 5
+ * 4 - 4
+ * 3 - 3
+ * 2 - 1
+ * 1 - 0
+ * 0 - 2
+ *
+ * The default should be an identity mapping.
+ */
+
+ for (i = 0; i < 8; ++i) {
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_prio(hellcreek, i);
+ hellcreek_write(hellcreek,
+ i << HR_PRTCCFG_PCP_TC_MAP_SHIFT,
+ HR_PRTCCFG);
+
+ mutex_unlock(&hellcreek->reg_lock);
+ }
+}
+
+static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
+{
+ static struct hellcreek_fdb_entry ptp = {
+ /* MAC: 01-1B-19-00-00-00 */
+ .mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry p2p = {
+ /* MAC: 01-80-C2-00-00-0E */
+ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
+ .reprio_en = 1,
+ };
+ int ret;
+
+ mutex_lock(&hellcreek->reg_lock);
+ ret = __hellcreek_fdb_add(hellcreek, &ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &p2p);
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_setup(struct dsa_switch *ds)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 swcfg = 0;
+ int ret, i;
+
+ dev_dbg(hellcreek->dev, "Set up the switch\n");
+
+ /* Let's go */
+ ret = hellcreek_enable_ip_core(hellcreek);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to enable IP core!\n");
+ return ret;
+ }
+
+ /* Enable CPU/Tunnel ports */
+ hellcreek_setup_cpu_and_tunnel_port(hellcreek);
+
+ /* Switch config: Keep defaults, enable FDB aging and learning and tag
+ * each frame from/to cpu port for DSA tagging. Also enable the length
+ * aware shaping mode. This eliminates the need for Qbv guard bands.
+ */
+ swcfg |= HR_SWCFG_FDBAGE_EN |
+ HR_SWCFG_FDBLRN_EN |
+ HR_SWCFG_ALWAYS_OBT |
+ (HR_SWCFG_LAS_ON << HR_SWCFG_LAS_MODE_SHIFT);
+ hellcreek->swcfg = swcfg;
+ hellcreek_write(hellcreek, swcfg, HR_SWCFG);
+
+ /* Initial vlan membership to reflect port separation */
+ for (i = 0; i < ds->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ hellcreek_setup_vlan_membership(ds, i, true);
+ }
+
+ /* Configure PCP <-> TC mapping */
+ hellcreek_setup_tc_identity_mapping(hellcreek);
+
+ /* Allow VLAN configurations while not filtering which is the default
+ * for new DSA drivers.
+ */
+ ds->configure_vlan_while_not_filtering = true;
+
+ /* The VLAN awareness is a global switch setting. Therefore, mixed vlan
+ * filtering setups are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Intercept _all_ PTP multicast traffic */
+ ret = hellcreek_setup_fdb(hellcreek);
+ if (ret) {
+ dev_err(hellcreek->dev,
+ "Failed to insert static PTP FDB entries\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port);
+
+ /* The MAC settings are a hardware configuration option and cannot be
+ * changed at run time or by strapping. Therefore the attached PHYs
+ * should be programmed to only advertise settings which are supported
+ * by the hardware.
+ */
+ if (hellcreek->pdata->is_100_mbits)
+ phylink_set(mask, 100baseT_Full);
+ else
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int
+hellcreek_port_prechangeupper(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ bool used = true;
+ int ret = -EBUSY;
+ u16 vid;
+ int i;
+
+ dev_dbg(hellcreek->dev, "Pre change upper for port %d\n", port);
+
+ /*
+ * Deny VLAN devices on top of lan ports with the same VLAN ids, because
+ * it breaks the port separation due to the private VLANs. Example:
+ *
+ * lan0.100 *and* lan1.100 cannot be used in parallel. However, lan0.99
+ * and lan1.100 works.
+ */
+
+ if (!is_vlan_dev(info->upper_dev))
+ return 0;
+
+ vid = vlan_dev_vlan_id(info->upper_dev);
+
+ /* For all ports, check bitmaps */
+ mutex_lock(&hellcreek->vlan_lock);
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ if (port == i)
+ continue;
+
+ used = used && test_bit(vid, hellcreek->ports[i].vlan_dev_bitmap);
+ }
+
+ if (used)
+ goto out;
+
+ /* Update bitmap */
+ set_bit(vid, hellcreek->ports[port].vlan_dev_bitmap);
+
+ ret = 0;
+
+out:
+ mutex_unlock(&hellcreek->vlan_lock);
+
+ return ret;
+}
+
+static const struct dsa_switch_ops hellcreek_ds_ops = {
+ .get_ethtool_stats = hellcreek_get_ethtool_stats,
+ .get_sset_count = hellcreek_get_sset_count,
+ .get_strings = hellcreek_get_strings,
+ .get_tag_protocol = hellcreek_get_tag_protocol,
+ .get_ts_info = hellcreek_get_ts_info,
+ .phylink_validate = hellcreek_phylink_validate,
+ .port_bridge_join = hellcreek_port_bridge_join,
+ .port_bridge_leave = hellcreek_port_bridge_leave,
+ .port_disable = hellcreek_port_disable,
+ .port_enable = hellcreek_port_enable,
+ .port_fdb_add = hellcreek_fdb_add,
+ .port_fdb_del = hellcreek_fdb_del,
+ .port_fdb_dump = hellcreek_fdb_dump,
+ .port_hwtstamp_set = hellcreek_port_hwtstamp_set,
+ .port_hwtstamp_get = hellcreek_port_hwtstamp_get,
+ .port_prechangeupper = hellcreek_port_prechangeupper,
+ .port_rxtstamp = hellcreek_port_rxtstamp,
+ .port_stp_state_set = hellcreek_port_stp_state_set,
+ .port_txtstamp = hellcreek_port_txtstamp,
+ .port_vlan_add = hellcreek_vlan_add,
+ .port_vlan_del = hellcreek_vlan_del,
+ .port_vlan_filtering = hellcreek_vlan_filtering,
+ .port_vlan_prepare = hellcreek_vlan_prepare,
+ .setup = hellcreek_setup,
+};
+
+static int hellcreek_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hellcreek *hellcreek;
+ struct resource *res;
+ int ret, i;
+
+ hellcreek = devm_kzalloc(dev, sizeof(*hellcreek), GFP_KERNEL);
+ if (!hellcreek)
+ return -ENOMEM;
+
+ hellcreek->vidmbrcfg = devm_kcalloc(dev, VLAN_N_VID,
+ sizeof(*hellcreek->vidmbrcfg),
+ GFP_KERNEL);
+ if (!hellcreek->vidmbrcfg)
+ return -ENOMEM;
+
+ hellcreek->pdata = of_device_get_match_data(dev);
+
+ hellcreek->ports = devm_kcalloc(dev, hellcreek->pdata->num_ports,
+ sizeof(*hellcreek->ports),
+ GFP_KERNEL);
+ if (!hellcreek->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ struct hellcreek_port *port = &hellcreek->ports[i];
+
+ port->counter_values =
+ devm_kcalloc(dev,
+ ARRAY_SIZE(hellcreek_counter),
+ sizeof(*port->counter_values),
+ GFP_KERNEL);
+ if (!port->counter_values)
+ return -ENOMEM;
+
+ port->vlan_dev_bitmap =
+ devm_kcalloc(dev,
+ BITS_TO_LONGS(VLAN_N_VID),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!port->vlan_dev_bitmap)
+ return -ENOMEM;
+
+ port->hellcreek = hellcreek;
+ port->port = i;
+ }
+
+ mutex_init(&hellcreek->reg_lock);
+ mutex_init(&hellcreek->vlan_lock);
+ mutex_init(&hellcreek->ptp_lock);
+
+ hellcreek->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsn");
+ if (!res) {
+ dev_err(dev, "No memory region provided!\n");
+ return -ENODEV;
+ }
+
+ hellcreek->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hellcreek->base)) {
+ dev_err(dev, "No memory available!\n");
+ return PTR_ERR(hellcreek->base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ptp");
+ if (!res) {
+ dev_err(dev, "No PTP memory region provided!\n");
+ return -ENODEV;
+ }
+
+ hellcreek->ptp_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hellcreek->ptp_base)) {
+ dev_err(dev, "No memory available!\n");
+ return PTR_ERR(hellcreek->ptp_base);
+ }
+
+ ret = hellcreek_detect(hellcreek);
+ if (ret) {
+ dev_err(dev, "No (known) chip found!\n");
+ return ret;
+ }
+
+ ret = hellcreek_wait_until_ready(hellcreek);
+ if (ret) {
+ dev_err(dev, "Switch didn't become ready!\n");
+ return ret;
+ }
+
+ hellcreek_feature_detect(hellcreek);
+
+ hellcreek->ds = devm_kzalloc(dev, sizeof(*hellcreek->ds), GFP_KERNEL);
+ if (!hellcreek->ds)
+ return -ENOMEM;
+
+ hellcreek->ds->dev = dev;
+ hellcreek->ds->priv = hellcreek;
+ hellcreek->ds->ops = &hellcreek_ds_ops;
+ hellcreek->ds->num_ports = hellcreek->pdata->num_ports;
+ hellcreek->ds->num_tx_queues = HELLCREEK_NUM_EGRESS_QUEUES;
+
+ ret = dsa_register_switch(hellcreek->ds);
+ if (ret) {
+ dev_err(dev, "Unable to register switch\n");
+ return ret;
+ }
+
+ ret = hellcreek_ptp_setup(hellcreek);
+ if (ret) {
+ dev_err(dev, "Failed to setup PTP!\n");
+ goto err_ptp_setup;
+ }
+
+ ret = hellcreek_hwtstamp_setup(hellcreek);
+ if (ret) {
+ dev_err(dev, "Failed to setup hardware timestamping!\n");
+ goto err_tstamp_setup;
+ }
+
+ platform_set_drvdata(pdev, hellcreek);
+
+ return 0;
+
+err_tstamp_setup:
+ hellcreek_ptp_free(hellcreek);
+err_ptp_setup:
+ dsa_unregister_switch(hellcreek->ds);
+
+ return ret;
+}
+
+static int hellcreek_remove(struct platform_device *pdev)
+{
+ struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+ hellcreek_hwtstamp_free(hellcreek);
+ hellcreek_ptp_free(hellcreek);
+ dsa_unregister_switch(hellcreek->ds);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct hellcreek_platform_data de1soc_r1_pdata = {
+ .num_ports = 4,
+ .is_100_mbits = 1,
+ .qbv_support = 1,
+ .qbv_on_cpu_port = 1,
+ .qbu_support = 0,
+ .module_id = 0x4c30,
+};
+
+static const struct of_device_id hellcreek_of_match[] = {
+ {
+ .compatible = "hirschmann,hellcreek-de1soc-r1",
+ .data = &de1soc_r1_pdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, hellcreek_of_match);
+
+static struct platform_driver hellcreek_driver = {
+ .probe = hellcreek_probe,
+ .remove = hellcreek_remove,
+ .driver = {
+ .name = "hellcreek",
+ .of_match_table = hellcreek_of_match,
+ },
+};
+module_platform_driver(hellcreek_driver);
+
+MODULE_AUTHOR("Kurt Kanzenbach <kurt@linutronix.de>");
+MODULE_DESCRIPTION("Hirschmann Hellcreek driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
new file mode 100644
index 000000000000..e81781ebc31c
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HELLCREEK_H_
+#define _HELLCREEK_H_
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/platform_data/hirschmann-hellcreek.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <net/dsa.h>
+
+/* Ports:
+ * - 0: CPU
+ * - 1: Tunnel
+ * - 2: TSN front port 1
+ * - 3: TSN front port 2
+ * - ...
+ */
+#define CPU_PORT 0
+#define TUNNEL_PORT 1
+
+#define HELLCREEK_VLAN_NO_MEMBER 0x0
+#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
+#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
+#define HELLCREEK_NUM_EGRESS_QUEUES 8
+
+/* Register definitions */
+#define HR_MODID_C (0 * 2)
+#define HR_REL_L_C (1 * 2)
+#define HR_REL_H_C (2 * 2)
+#define HR_BLD_L_C (3 * 2)
+#define HR_BLD_H_C (4 * 2)
+#define HR_CTRL_C (5 * 2)
+#define HR_CTRL_C_READY BIT(14)
+#define HR_CTRL_C_TRANSITION BIT(13)
+#define HR_CTRL_C_ENABLE BIT(0)
+
+#define HR_PSEL (0xa6 * 2)
+#define HR_PSEL_PTWSEL_SHIFT 4
+#define HR_PSEL_PTWSEL_MASK GENMASK(5, 4)
+#define HR_PSEL_PRTCWSEL_SHIFT 0
+#define HR_PSEL_PRTCWSEL_MASK GENMASK(2, 0)
+
+#define HR_PTCFG (0xa7 * 2)
+#define HR_PTCFG_MLIMIT_EN BIT(13)
+#define HR_PTCFG_UMC_FLT BIT(10)
+#define HR_PTCFG_UUC_FLT BIT(9)
+#define HR_PTCFG_UNTRUST BIT(8)
+#define HR_PTCFG_TAG_REQUIRED BIT(7)
+#define HR_PTCFG_PPRIO_SHIFT 4
+#define HR_PTCFG_PPRIO_MASK GENMASK(6, 4)
+#define HR_PTCFG_INGRESSFLT BIT(3)
+#define HR_PTCFG_BLOCKED BIT(2)
+#define HR_PTCFG_LEARNING_EN BIT(1)
+#define HR_PTCFG_ADMIN_EN BIT(0)
+
+#define HR_PRTCCFG (0xa8 * 2)
+#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
+#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+
+#define HR_CSEL (0x8d * 2)
+#define HR_CSEL_SHIFT 0
+#define HR_CSEL_MASK GENMASK(7, 0)
+#define HR_CRDL (0x8e * 2)
+#define HR_CRDH (0x8f * 2)
+
+#define HR_SWTRC_CFG (0x90 * 2)
+#define HR_SWTRC0 (0x91 * 2)
+#define HR_SWTRC1 (0x92 * 2)
+#define HR_PFREE (0x93 * 2)
+#define HR_MFREE (0x94 * 2)
+
+#define HR_FDBAGE (0x97 * 2)
+#define HR_FDBMAX (0x98 * 2)
+#define HR_FDBRDL (0x99 * 2)
+#define HR_FDBRDM (0x9a * 2)
+#define HR_FDBRDH (0x9b * 2)
+
+#define HR_FDBMDRD (0x9c * 2)
+#define HR_FDBMDRD_PORTMASK_SHIFT 0
+#define HR_FDBMDRD_PORTMASK_MASK GENMASK(3, 0)
+#define HR_FDBMDRD_AGE_SHIFT 4
+#define HR_FDBMDRD_AGE_MASK GENMASK(7, 4)
+#define HR_FDBMDRD_OBT BIT(8)
+#define HR_FDBMDRD_PASS_BLOCKED BIT(9)
+#define HR_FDBMDRD_STATIC BIT(11)
+#define HR_FDBMDRD_REPRIO_TC_SHIFT 12
+#define HR_FDBMDRD_REPRIO_TC_MASK GENMASK(14, 12)
+#define HR_FDBMDRD_REPRIO_EN BIT(15)
+
+#define HR_FDBWDL (0x9d * 2)
+#define HR_FDBWDM (0x9e * 2)
+#define HR_FDBWDH (0x9f * 2)
+#define HR_FDBWRM0 (0xa0 * 2)
+#define HR_FDBWRM0_PORTMASK_SHIFT 0
+#define HR_FDBWRM0_PORTMASK_MASK GENMASK(3, 0)
+#define HR_FDBWRM0_OBT BIT(8)
+#define HR_FDBWRM0_PASS_BLOCKED BIT(9)
+#define HR_FDBWRM0_REPRIO_TC_SHIFT 12
+#define HR_FDBWRM0_REPRIO_TC_MASK GENMASK(14, 12)
+#define HR_FDBWRM0_REPRIO_EN BIT(15)
+#define HR_FDBWRM1 (0xa1 * 2)
+
+#define HR_FDBWRCMD (0xa2 * 2)
+#define HR_FDBWRCMD_FDBDEL BIT(9)
+
+#define HR_SWCFG (0xa3 * 2)
+#define HR_SWCFG_GM_STATEMD BIT(15)
+#define HR_SWCFG_LAS_MODE_SHIFT 12
+#define HR_SWCFG_LAS_MODE_MASK GENMASK(13, 12)
+#define HR_SWCFG_LAS_OFF (0x00)
+#define HR_SWCFG_LAS_ON (0x01)
+#define HR_SWCFG_LAS_STATIC (0x10)
+#define HR_SWCFG_CT_EN BIT(11)
+#define HR_SWCFG_VLAN_UNAWARE BIT(10)
+#define HR_SWCFG_ALWAYS_OBT BIT(9)
+#define HR_SWCFG_FDBAGE_EN BIT(5)
+#define HR_SWCFG_FDBLRN_EN BIT(4)
+
+#define HR_SWSTAT (0xa4 * 2)
+#define HR_SWSTAT_FAIL BIT(4)
+#define HR_SWSTAT_BUSY BIT(0)
+
+#define HR_SWCMD (0xa5 * 2)
+#define HW_SWCMD_FLUSH BIT(0)
+
+#define HR_VIDCFG (0xaa * 2)
+#define HR_VIDCFG_VID_SHIFT 0
+#define HR_VIDCFG_VID_MASK GENMASK(11, 0)
+#define HR_VIDCFG_PVID BIT(12)
+
+#define HR_VIDMBRCFG (0xab * 2)
+#define HR_VIDMBRCFG_P0MBR_SHIFT 0
+#define HR_VIDMBRCFG_P0MBR_MASK GENMASK(1, 0)
+#define HR_VIDMBRCFG_P1MBR_SHIFT 2
+#define HR_VIDMBRCFG_P1MBR_MASK GENMASK(3, 2)
+#define HR_VIDMBRCFG_P2MBR_SHIFT 4
+#define HR_VIDMBRCFG_P2MBR_MASK GENMASK(5, 4)
+#define HR_VIDMBRCFG_P3MBR_SHIFT 6
+#define HR_VIDMBRCFG_P3MBR_MASK GENMASK(7, 6)
+
+#define HR_FEABITS0 (0xac * 2)
+#define HR_FEABITS0_FDBBINS_SHIFT 4
+#define HR_FEABITS0_FDBBINS_MASK GENMASK(7, 4)
+#define HR_FEABITS0_PCNT_SHIFT 8
+#define HR_FEABITS0_PCNT_MASK GENMASK(11, 8)
+#define HR_FEABITS0_MCNT_SHIFT 12
+#define HR_FEABITS0_MCNT_MASK GENMASK(15, 12)
+
+#define TR_QTRACK (0xb1 * 2)
+#define TR_TGDVER (0xb3 * 2)
+#define TR_TGDVER_REV_MIN_MASK GENMASK(7, 0)
+#define TR_TGDVER_REV_MIN_SHIFT 0
+#define TR_TGDVER_REV_MAJ_MASK GENMASK(15, 8)
+#define TR_TGDVER_REV_MAJ_SHIFT 8
+#define TR_TGDSEL (0xb4 * 2)
+#define TR_TGDSEL_TDGSEL_MASK GENMASK(1, 0)
+#define TR_TGDSEL_TDGSEL_SHIFT 0
+#define TR_TGDCTRL (0xb5 * 2)
+#define TR_TGDCTRL_GATE_EN BIT(0)
+#define TR_TGDCTRL_CYC_SNAP BIT(4)
+#define TR_TGDCTRL_SNAP_EST BIT(5)
+#define TR_TGDCTRL_ADMINGATESTATES_MASK GENMASK(15, 8)
+#define TR_TGDCTRL_ADMINGATESTATES_SHIFT 8
+#define TR_TGDSTAT0 (0xb6 * 2)
+#define TR_TGDSTAT1 (0xb7 * 2)
+#define TR_ESTWRL (0xb8 * 2)
+#define TR_ESTWRH (0xb9 * 2)
+#define TR_ESTCMD (0xba * 2)
+#define TR_ESTCMD_ESTSEC_MASK GENMASK(2, 0)
+#define TR_ESTCMD_ESTSEC_SHIFT 0
+#define TR_ESTCMD_ESTARM BIT(4)
+#define TR_ESTCMD_ESTSWCFG BIT(5)
+#define TR_EETWRL (0xbb * 2)
+#define TR_EETWRH (0xbc * 2)
+#define TR_EETCMD (0xbd * 2)
+#define TR_EETCMD_EETSEC_MASK GEMASK(2, 0)
+#define TR_EETCMD_EETSEC_SHIFT 0
+#define TR_EETCMD_EETARM BIT(4)
+#define TR_CTWRL (0xbe * 2)
+#define TR_CTWRH (0xbf * 2)
+#define TR_LCNSL (0xc1 * 2)
+#define TR_LCNSH (0xc2 * 2)
+#define TR_LCS (0xc3 * 2)
+#define TR_GCLDAT (0xc4 * 2)
+#define TR_GCLDAT_GCLWRGATES_MASK GENMASK(7, 0)
+#define TR_GCLDAT_GCLWRGATES_SHIFT 0
+#define TR_GCLDAT_GCLWRLAST BIT(8)
+#define TR_GCLDAT_GCLOVRI BIT(9)
+#define TR_GCLTIL (0xc5 * 2)
+#define TR_GCLTIH (0xc6 * 2)
+#define TR_GCLCMD (0xc7 * 2)
+#define TR_GCLCMD_GCLWRADR_MASK GENMASK(7, 0)
+#define TR_GCLCMD_GCLWRADR_SHIFT 0
+#define TR_GCLCMD_INIT_GATE_STATES_MASK GENMASK(15, 8)
+#define TR_GCLCMD_INIT_GATE_STATES_SHIFT 8
+
+struct hellcreek_counter {
+ u8 offset;
+ const char *name;
+};
+
+struct hellcreek;
+
+/* State flags for hellcreek_port_hwtstamp::state */
+enum {
+ HELLCREEK_HWTSTAMP_ENABLED,
+ HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
+};
+
+/* A structure to hold hardware timestamping information per port */
+struct hellcreek_port_hwtstamp {
+ /* Timestamping state */
+ unsigned long state;
+
+ /* Resources for receive timestamping */
+ struct sk_buff_head rx_queue; /* For synchronization messages */
+
+ /* Resources for transmit timestamping */
+ unsigned long tx_tstamp_start;
+ struct sk_buff *tx_skb;
+
+ /* Current timestamp configuration */
+ struct hwtstamp_config tstamp_config;
+};
+
+struct hellcreek_port {
+ struct hellcreek *hellcreek;
+ unsigned long *vlan_dev_bitmap;
+ int port;
+ u16 ptcfg; /* ptcfg shadow */
+ u64 *counter_values;
+
+ /* Per-port timestamping resources */
+ struct hellcreek_port_hwtstamp port_hwtstamp;
+};
+
+struct hellcreek_fdb_entry {
+ size_t idx;
+ unsigned char mac[ETH_ALEN];
+ u8 portmask;
+ u8 age;
+ u8 is_obt;
+ u8 pass_blocked;
+ u8 is_static;
+ u8 reprio_tc;
+ u8 reprio_en;
+};
+
+struct hellcreek {
+ const struct hellcreek_platform_data *pdata;
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct hellcreek_port *ports;
+ struct delayed_work overflow_work;
+ struct led_classdev led_is_gm;
+ struct led_classdev led_sync_good;
+ struct mutex reg_lock; /* Switch IP register lock */
+ struct mutex vlan_lock; /* VLAN bitmaps lock */
+ struct mutex ptp_lock; /* PTP IP register lock */
+ void __iomem *base;
+ void __iomem *ptp_base;
+ u16 swcfg; /* swcfg shadow */
+ u8 *vidmbrcfg; /* vidmbrcfg shadow */
+ u64 seconds; /* PTP seconds */
+ u64 last_ts; /* Used for overflow detection */
+ u16 status_out; /* ptp.status_out shadow */
+ size_t fdb_entries;
+};
+
+#endif /* _HELLCREEK_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
new file mode 100644
index 000000000000..69dd9a2e8bb6
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ * Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/ptp_classify.h>
+
+#include "hellcreek.h"
+#include "hellcreek_hwtstamp.h"
+#include "hellcreek_ptp.h"
+
+int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ info->phc_index = hellcreek->ptp_clock ?
+ ptp_clock_index(hellcreek->ptp_clock) : -1;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ /* enabled tx timestamping */
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+
+ /* L2 & L4 PTPv2 event rx messages are timestamped */
+ info->rx_filters = BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+/* Enabling/disabling TX and RX HW timestamping for different PTP messages is
+ * not available in the switch. Thus, this function only serves as a check if
+ * the user requested what is actually available or not
+ */
+static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
+ struct hwtstamp_config *config)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+ bool tx_tstamp_enable = false;
+ bool rx_tstamp_enable = false;
+
+ /* Interaction with the timestamp hardware is prevented here. It is
+ * enabled when this config function ends successfully
+ */
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
+
+ /* Reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ tx_tstamp_enable = true;
+ break;
+
+ /* TX HW timestamping can't be disabled on the switch */
+ case HWTSTAMP_TX_OFF:
+ config->tx_type = HWTSTAMP_TX_ON;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ /* RX HW timestamping can't be disabled on the switch */
+ case HWTSTAMP_FILTER_NONE:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ rx_tstamp_enable = true;
+ break;
+
+ /* RX HW timestamping can't be enabled for all messages on the switch */
+ case HWTSTAMP_FILTER_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ if (!tx_tstamp_enable)
+ return -ERANGE;
+
+ if (!rx_tstamp_enable)
+ return -ERANGE;
+
+ /* If this point is reached, then the requested hwtstamp config is
+ * compatible with the hwtstamp offered by the switch. Therefore,
+ * enable the interaction with the HW timestamping
+ */
+ set_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
+
+ return 0;
+}
+
+int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct hwtstamp_config config;
+ int err;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ if (err)
+ return err;
+
+ /* Save the chosen configuration to be returned later */
+ memcpy(&ps->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct hwtstamp_config *config;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+ config = &ps->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
+ * if the caller should not.
+ */
+static struct ptp_header *hellcreek_should_tstamp(struct hellcreek *hellcreek,
+ int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return NULL;
+
+ if (!test_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state))
+ return NULL;
+
+ return hdr;
+}
+
+static u64 hellcreek_get_reserved_field(const struct ptp_header *hdr)
+{
+ return be32_to_cpu(hdr->reserved2);
+}
+
+static void hellcreek_clear_reserved_field(struct ptp_header *hdr)
+{
+ hdr->reserved2 = 0;
+}
+
+static int hellcreek_ptp_hwtstamp_available(struct hellcreek *hellcreek,
+ unsigned int ts_reg)
+{
+ u16 status;
+
+ status = hellcreek_ptp_read(hellcreek, ts_reg);
+
+ if (status & PR_TS_STATUS_TS_LOST)
+ dev_err(hellcreek->dev,
+ "Tx time stamp lost! This should never happen!\n");
+
+ /* If hwtstamp is not available, this means the previous hwtstamp was
+ * successfully read, and the one we need is not yet available
+ */
+ return (status & PR_TS_STATUS_TS_AVAIL) ? 1 : 0;
+}
+
+/* Get nanoseconds timestamp from timestamping unit */
+static u64 hellcreek_ptp_hwtstamp_read(struct hellcreek *hellcreek,
+ unsigned int ts_reg)
+{
+ u16 nsl, nsh;
+
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsl = hellcreek_ptp_read(hellcreek, ts_reg);
+
+ return (u64)nsl | ((u64)nsh << 16);
+}
+
+static int hellcreek_txtstamp_work(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps, int port)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ unsigned int status_reg, data_reg;
+ struct sk_buff *tmp_skb;
+ int ts_status;
+ u64 ns = 0;
+
+ if (!ps->tx_skb)
+ return 0;
+
+ switch (port) {
+ case 2:
+ status_reg = PR_TS_TX_P1_STATUS_C;
+ data_reg = PR_TS_TX_P1_DATA_C;
+ break;
+ case 3:
+ status_reg = PR_TS_TX_P2_STATUS_C;
+ data_reg = PR_TS_TX_P2_DATA_C;
+ break;
+ default:
+ dev_err(hellcreek->dev, "Wrong port for timestamping!\n");
+ return 0;
+ }
+
+ ts_status = hellcreek_ptp_hwtstamp_available(hellcreek, status_reg);
+
+ /* Not available yet? */
+ if (ts_status == 0) {
+ /* Check whether the operation of reading the tx timestamp has
+ * exceeded its allowed period
+ */
+ if (time_is_before_jiffies(ps->tx_tstamp_start +
+ TX_TSTAMP_TIMEOUT)) {
+ dev_err(hellcreek->dev,
+ "Timeout while waiting for Tx timestamp!\n");
+ goto free_and_clear_skb;
+ }
+
+ /* The timestamp should be available quickly, while getting it
+ * in high priority. Restart the work
+ */
+ return 1;
+ }
+
+ mutex_lock(&hellcreek->ptp_lock);
+ ns = hellcreek_ptp_hwtstamp_read(hellcreek, data_reg);
+ ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Now we have the timestamp in nanoseconds, store it in the correct
+ * structure in order to send it to the user
+ */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+
+ tmp_skb = ps->tx_skb;
+ ps->tx_skb = NULL;
+
+ /* skb_complete_tx_timestamp() frees up the client to make another
+ * timestampable transmit. We have to be ready for it by clearing the
+ * ps->tx_skb "flag" beforehand
+ */
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ /* Deliver a clone of the original outgoing tx_skb with tx hwtstamp */
+ skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);
+
+ return 0;
+
+free_and_clear_skb:
+ dev_kfree_skb_any(ps->tx_skb);
+ ps->tx_skb = NULL;
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ return 0;
+}
+
+static void hellcreek_get_rxts(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps,
+ struct sk_buff *skb, struct sk_buff_head *rxq,
+ int port)
+{
+ struct skb_shared_hwtstamps *shwt;
+ struct sk_buff_head received;
+ unsigned long flags;
+
+ /* The latched timestamp belongs to one of the received frames. */
+ __skb_queue_head_init(&received);
+
+ /* Lock & disable interrupts */
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ /* Add the reception queue "rxq" to the "received" queue an reintialize
+ * "rxq". From now on, we deal with "received" not with "rxq"
+ */
+ skb_queue_splice_tail_init(rxq, &received);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ for (; skb; skb = __skb_dequeue(&received)) {
+ struct ptp_header *hdr;
+ unsigned int type;
+ u64 ns;
+
+ /* Get nanoseconds from ptp packet */
+ type = SKB_PTP_TYPE(skb);
+ hdr = ptp_parse_header(skb, type);
+ ns = hellcreek_get_reserved_field(hdr);
+ hellcreek_clear_reserved_field(hdr);
+
+ /* Add seconds part */
+ mutex_lock(&hellcreek->ptp_lock);
+ ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Save time stamp */
+ shwt = skb_hwtstamps(skb);
+ memset(shwt, 0, sizeof(*shwt));
+ shwt->hwtstamp = ns_to_ktime(ns);
+ netif_rx_ni(skb);
+ }
+}
+
+static void hellcreek_rxtstamp_work(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps,
+ int port)
+{
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ps->rx_queue);
+ if (skb)
+ hellcreek_get_rxts(hellcreek, ps, skb, &ps->rx_queue, port);
+}
+
+long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ struct dsa_switch *ds = hellcreek->ds;
+ int i, restart = 0;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ struct hellcreek_port_hwtstamp *ps;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ ps = &hellcreek->ports[i].port_hwtstamp;
+
+ if (test_bit(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state))
+ restart |= hellcreek_txtstamp_work(hellcreek, ps, i);
+
+ hellcreek_rxtstamp_work(hellcreek, ps, i);
+ }
+
+ return restart ? 1 : -1;
+}
+
+bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct ptp_header *hdr;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ /* Check if the driver is expected to do HW timestamping */
+ if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ /* Make sure the message is a PTP message that needs to be timestamped
+ * and the interaction with the HW timestamping is enabled. If not, stop
+ * here
+ */
+ hdr = hellcreek_should_tstamp(hellcreek, port, clone, type);
+ if (!hdr)
+ return false;
+
+ if (test_and_set_bit_lock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
+ &ps->state))
+ return false;
+
+ ps->tx_skb = clone;
+
+ /* store the number of ticks occurred since system start-up till this
+ * moment
+ */
+ ps->tx_tstamp_start = jiffies;
+
+ ptp_schedule_worker(hellcreek->ptp_clock, 0);
+
+ return true;
+}
+
+bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct ptp_header *hdr;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ /* This check only fails if the user did not initialize hardware
+ * timestamping beforehand.
+ */
+ if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT)
+ return false;
+
+ /* Make sure the message is a PTP message that needs to be timestamped
+ * and the interaction with the HW timestamping is enabled. If not, stop
+ * here
+ */
+ hdr = hellcreek_should_tstamp(hellcreek, port, skb, type);
+ if (!hdr)
+ return false;
+
+ SKB_PTP_TYPE(skb) = type;
+
+ skb_queue_tail(&ps->rx_queue, skb);
+
+ ptp_schedule_worker(hellcreek->ptp_clock, 0);
+
+ return true;
+}
+
+static void hellcreek_hwtstamp_port_setup(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+
+ skb_queue_head_init(&ps->rx_queue);
+}
+
+int hellcreek_hwtstamp_setup(struct hellcreek *hellcreek)
+{
+ struct dsa_switch *ds = hellcreek->ds;
+ int i;
+
+ /* Initialize timestamping ports. */
+ for (i = 0; i < ds->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ hellcreek_hwtstamp_port_setup(hellcreek, i);
+ }
+
+ /* Select the synchronized clock as the source timekeeper for the
+ * timestamps and enable inline timestamping.
+ */
+ hellcreek_ptp_write(hellcreek, PR_SETTINGS_C_TS_SRC_TK_MASK |
+ PR_SETTINGS_C_RES3TS,
+ PR_SETTINGS_C);
+
+ return 0;
+}
+
+void hellcreek_hwtstamp_free(struct hellcreek *hellcreek)
+{
+ /* Nothing todo */
+}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
new file mode 100644
index 000000000000..c0745ffa1ebb
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kurt Kanzenbach <kurt@linutronix.de>
+ * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ */
+
+#ifndef _HELLCREEK_HWTSTAMP_H_
+#define _HELLCREEK_HWTSTAMP_H_
+
+#include <net/dsa.h>
+#include "hellcreek.h"
+
+/* Timestamp Register */
+#define PR_TS_RX_P1_STATUS_C (0x1d * 2)
+#define PR_TS_RX_P1_DATA_C (0x1e * 2)
+#define PR_TS_TX_P1_STATUS_C (0x1f * 2)
+#define PR_TS_TX_P1_DATA_C (0x20 * 2)
+#define PR_TS_RX_P2_STATUS_C (0x25 * 2)
+#define PR_TS_RX_P2_DATA_C (0x26 * 2)
+#define PR_TS_TX_P2_STATUS_C (0x27 * 2)
+#define PR_TS_TX_P2_DATA_C (0x28 * 2)
+
+#define PR_TS_STATUS_TS_AVAIL BIT(2)
+#define PR_TS_STATUS_TS_LOST BIT(3)
+
+#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
+
+/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
+ * timestamp. When working properly, hardware will produce a timestamp
+ * within 1ms. Software may enounter delays, so the timeout is set
+ * accordingly.
+ */
+#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
+
+int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+
+bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+
+int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info);
+
+long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp);
+
+int hellcreek_hwtstamp_setup(struct hellcreek *chip);
+void hellcreek_hwtstamp_free(struct hellcreek *chip);
+
+#endif /* _HELLCREEK_HWTSTAMP_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
new file mode 100644
index 000000000000..2572c6087bb5
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ * Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/ptp_clock_kernel.h>
+#include "hellcreek.h"
+#include "hellcreek_ptp.h"
+#include "hellcreek_hwtstamp.h"
+
+u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset)
+{
+ return readw(hellcreek->ptp_base + offset);
+}
+
+void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset)
+{
+ writew(data, hellcreek->ptp_base + offset);
+}
+
+/* Get nanoseconds from PTP clock */
+static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
+{
+ u16 nsl, nsh;
+
+ /* Take a snapshot */
+ hellcreek_ptp_write(hellcreek, PR_COMMAND_C_SS, PR_COMMAND_C);
+
+ /* The time of the day is saved as 96 bits. However, due to hardware
+ * limitations the seconds are not or only partly kept in the PTP
+ * core. Currently only three bits for the seconds are available. That's
+ * why only the nanoseconds are used and the seconds are tracked in
+ * software. Anyway due to internal locking all five registers should be
+ * read.
+ */
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+
+ return (u64)nsl | ((u64)nsh << 16);
+}
+
+static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
+{
+ u64 ns;
+
+ ns = hellcreek_ptp_clock_read(hellcreek);
+ if (ns < hellcreek->last_ts)
+ hellcreek->seconds++;
+ hellcreek->last_ts = ns;
+ ns += hellcreek->seconds * NSEC_PER_SEC;
+
+ return ns;
+}
+
+/* Retrieve the seconds parts in nanoseconds for a packet timestamped with @ns.
+ * There has to be a check whether an overflow occurred between the packet
+ * arrival and now. If so use the correct seconds (-1) for calculating the
+ * packet arrival time.
+ */
+u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
+{
+ u64 s;
+
+ __hellcreek_ptp_gettime(hellcreek);
+ if (hellcreek->last_ts > ns)
+ s = hellcreek->seconds * NSEC_PER_SEC;
+ else
+ s = (hellcreek->seconds - 1) * NSEC_PER_SEC;
+
+ return s;
+}
+
+static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u64 ns;
+
+ mutex_lock(&hellcreek->ptp_lock);
+ ns = __hellcreek_ptp_gettime(hellcreek);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int hellcreek_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 secl, nsh, nsl;
+
+ secl = ts->tv_sec & 0xffff;
+ nsh = ((u32)ts->tv_nsec & 0xffff0000) >> 16;
+ nsl = ts->tv_nsec & 0xffff;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Update overflow data structure */
+ hellcreek->seconds = ts->tv_sec;
+ hellcreek->last_ts = ts->tv_nsec;
+
+ /* Set time in clock */
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, secl, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, nsh, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, nsl, PR_CLOCK_WRITE_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 negative = 0, addendh, addendl;
+ u32 addend;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ negative = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* IP-Core adjusts the nominal frequency by adding or subtracting 1 ns
+ * from the 8 ns (period of the oscillator) every time the accumulator
+ * register overflows. The value stored in the addend register is added
+ * to the accumulator register every 8 ns.
+ *
+ * addend value = (2^30 * accumulator_overflow_rate) /
+ * oscillator_frequency
+ * where:
+ *
+ * oscillator_frequency = 125 MHz
+ * accumulator_overflow_rate = 125 MHz * scaled_ppm * 2^-16 * 10^-6 * 8
+ */
+ adj = scaled_ppm;
+ adj <<= 11;
+ addend = (u32)div_u64(adj, 15625);
+
+ addendh = (addend & 0xffff0000) >> 16;
+ addendl = addend & 0xffff;
+
+ negative = (negative << 15) & 0x8000;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Set drift register */
+ hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, addendh, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, addendl, PR_CLOCK_DRIFT_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 negative = 0, counth, countl;
+ u32 count_val;
+
+ /* If the offset is larger than IP-Core slow offset resources. Don't
+ * consider slow adjustment. Rather, add the offset directly to the
+ * current time
+ */
+ if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
+ struct timespec64 now, then = ns_to_timespec64(delta);
+
+ hellcreek_ptp_gettime(ptp, &now);
+ now = timespec64_add(now, then);
+ hellcreek_ptp_settime(ptp, &now);
+
+ return 0;
+ }
+
+ if (delta < 0) {
+ negative = 1;
+ delta = -delta;
+ }
+
+ /* 'count_val' does not exceed the maximum register size (2^30) */
+ count_val = div_s64(delta, MAX_NS_PER_STEP);
+
+ counth = (count_val & 0xffff0000) >> 16;
+ countl = count_val & 0xffff;
+
+ negative = (negative << 15) & 0x8000;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Set offset write register */
+ hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, MAX_NS_PER_STEP, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, MIN_CLK_CYCLES_BETWEEN_STEPS,
+ PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, countl, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, counth, PR_CLOCK_OFFSET_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static void hellcreek_ptp_overflow_check(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct hellcreek *hellcreek;
+
+ hellcreek = dw_overflow_to_hellcreek(dw);
+
+ mutex_lock(&hellcreek->ptp_lock);
+ __hellcreek_ptp_gettime(hellcreek);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ schedule_delayed_work(&hellcreek->overflow_work,
+ HELLCREEK_OVERFLOW_PERIOD);
+}
+
+static enum led_brightness hellcreek_get_brightness(struct hellcreek *hellcreek,
+ int led)
+{
+ return (hellcreek->status_out & led) ? 1 : 0;
+}
+
+static void hellcreek_set_brightness(struct hellcreek *hellcreek, int led,
+ enum led_brightness b)
+{
+ mutex_lock(&hellcreek->ptp_lock);
+
+ if (b)
+ hellcreek->status_out |= led;
+ else
+ hellcreek->status_out &= ~led;
+
+ hellcreek_ptp_write(hellcreek, hellcreek->status_out, STATUS_OUT);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+}
+
+static void hellcreek_led_sync_good_set(struct led_classdev *ldev,
+ enum led_brightness b)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
+
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, b);
+}
+
+static enum led_brightness hellcreek_led_sync_good_get(struct led_classdev *ldev)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
+
+ return hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD);
+}
+
+static void hellcreek_led_is_gm_set(struct led_classdev *ldev,
+ enum led_brightness b)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
+
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, b);
+}
+
+static enum led_brightness hellcreek_led_is_gm_get(struct led_classdev *ldev)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
+
+ return hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM);
+}
+
+/* There two available LEDs internally called sync_good and is_gm. However, the
+ * user might want to use a different label and specify the default state. Take
+ * those properties from device tree.
+ */
+static int hellcreek_led_setup(struct hellcreek *hellcreek)
+{
+ struct device_node *leds, *led = NULL;
+ const char *label, *state;
+ int ret = -EINVAL;
+
+ leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
+ if (!leds) {
+ dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
+ return ret;
+ }
+
+ hellcreek->status_out = 0;
+
+ led = of_get_next_available_child(leds, led);
+ if (!led) {
+ dev_err(hellcreek->dev, "First LED not specified!\n");
+ goto out;
+ }
+
+ ret = of_property_read_string(led, "label", &label);
+ hellcreek->led_sync_good.name = ret ? "sync_good" : label;
+
+ ret = of_property_read_string(led, "default-state", &state);
+ if (!ret) {
+ if (!strcmp(state, "on"))
+ hellcreek->led_sync_good.brightness = 1;
+ else if (!strcmp(state, "off"))
+ hellcreek->led_sync_good.brightness = 0;
+ else if (!strcmp(state, "keep"))
+ hellcreek->led_sync_good.brightness =
+ hellcreek_get_brightness(hellcreek,
+ STATUS_OUT_SYNC_GOOD);
+ }
+
+ hellcreek->led_sync_good.max_brightness = 1;
+ hellcreek->led_sync_good.brightness_set = hellcreek_led_sync_good_set;
+ hellcreek->led_sync_good.brightness_get = hellcreek_led_sync_good_get;
+
+ led = of_get_next_available_child(leds, led);
+ if (!led) {
+ dev_err(hellcreek->dev, "Second LED not specified!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_string(led, "label", &label);
+ hellcreek->led_is_gm.name = ret ? "is_gm" : label;
+
+ ret = of_property_read_string(led, "default-state", &state);
+ if (!ret) {
+ if (!strcmp(state, "on"))
+ hellcreek->led_is_gm.brightness = 1;
+ else if (!strcmp(state, "off"))
+ hellcreek->led_is_gm.brightness = 0;
+ else if (!strcmp(state, "keep"))
+ hellcreek->led_is_gm.brightness =
+ hellcreek_get_brightness(hellcreek,
+ STATUS_OUT_IS_GM);
+ }
+
+ hellcreek->led_is_gm.max_brightness = 1;
+ hellcreek->led_is_gm.brightness_set = hellcreek_led_is_gm_set;
+ hellcreek->led_is_gm.brightness_get = hellcreek_led_is_gm_get;
+
+ /* Set initial state */
+ if (hellcreek->led_sync_good.brightness == 1)
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, 1);
+ if (hellcreek->led_is_gm.brightness == 1)
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
+
+ /* Register both leds */
+ led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
+ led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+
+ ret = 0;
+
+out:
+ of_node_put(leds);
+
+ return ret;
+}
+
+int hellcreek_ptp_setup(struct hellcreek *hellcreek)
+{
+ u16 status;
+ int ret;
+
+ /* Set up the overflow work */
+ INIT_DELAYED_WORK(&hellcreek->overflow_work,
+ hellcreek_ptp_overflow_check);
+
+ /* Setup PTP clock */
+ hellcreek->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(hellcreek->ptp_clock_info.name,
+ sizeof(hellcreek->ptp_clock_info.name),
+ dev_name(hellcreek->dev));
+
+ /* IP-Core can add up to 0.5 ns per 8 ns cycle, which means
+ * accumulator_overflow_rate shall not exceed 62.5 MHz (which adjusts
+ * the nominal frequency by 6.25%)
+ */
+ hellcreek->ptp_clock_info.max_adj = 62500000;
+ hellcreek->ptp_clock_info.n_alarm = 0;
+ hellcreek->ptp_clock_info.n_pins = 0;
+ hellcreek->ptp_clock_info.n_ext_ts = 0;
+ hellcreek->ptp_clock_info.n_per_out = 0;
+ hellcreek->ptp_clock_info.pps = 0;
+ hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
+ hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
+ hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
+ hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
+ hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
+ hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
+
+ hellcreek->ptp_clock = ptp_clock_register(&hellcreek->ptp_clock_info,
+ hellcreek->dev);
+ if (IS_ERR(hellcreek->ptp_clock))
+ return PTR_ERR(hellcreek->ptp_clock);
+
+ /* Enable the offset correction process, if no offset correction is
+ * already taking place
+ */
+ status = hellcreek_ptp_read(hellcreek, PR_CLOCK_STATUS_C);
+ if (!(status & PR_CLOCK_STATUS_C_OFS_ACT))
+ hellcreek_ptp_write(hellcreek,
+ status | PR_CLOCK_STATUS_C_ENA_OFS,
+ PR_CLOCK_STATUS_C);
+
+ /* Enable the drift correction process */
+ hellcreek_ptp_write(hellcreek, status | PR_CLOCK_STATUS_C_ENA_DRIFT,
+ PR_CLOCK_STATUS_C);
+
+ /* LED setup */
+ ret = hellcreek_led_setup(hellcreek);
+ if (ret) {
+ if (hellcreek->ptp_clock)
+ ptp_clock_unregister(hellcreek->ptp_clock);
+ return ret;
+ }
+
+ schedule_delayed_work(&hellcreek->overflow_work,
+ HELLCREEK_OVERFLOW_PERIOD);
+
+ return 0;
+}
+
+void hellcreek_ptp_free(struct hellcreek *hellcreek)
+{
+ led_classdev_unregister(&hellcreek->led_is_gm);
+ led_classdev_unregister(&hellcreek->led_sync_good);
+ cancel_delayed_work_sync(&hellcreek->overflow_work);
+ if (hellcreek->ptp_clock)
+ ptp_clock_unregister(hellcreek->ptp_clock);
+ hellcreek->ptp_clock = NULL;
+}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.h b/drivers/net/dsa/hirschmann/hellcreek_ptp.h
new file mode 100644
index 000000000000..0b51392c7e56
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kurt Kanzenbach <kurt@linutronix.de>
+ * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ */
+
+#ifndef _HELLCREEK_PTP_H_
+#define _HELLCREEK_PTP_H_
+
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "hellcreek.h"
+
+/* Every jump in time is 7 ns */
+#define MAX_NS_PER_STEP 7L
+
+/* Correct offset at every clock cycle */
+#define MIN_CLK_CYCLES_BETWEEN_STEPS 0
+
+/* Maximum available slow offset resources */
+#define MAX_SLOW_OFFSET_ADJ \
+ ((unsigned long long)((1 << 30) - 1) * MAX_NS_PER_STEP)
+
+/* four times a second overflow check */
+#define HELLCREEK_OVERFLOW_PERIOD (HZ / 4)
+
+/* PTP Register */
+#define PR_SETTINGS_C (0x09 * 2)
+#define PR_SETTINGS_C_RES3TS BIT(4)
+#define PR_SETTINGS_C_TS_SRC_TK_SHIFT 8
+#define PR_SETTINGS_C_TS_SRC_TK_MASK GENMASK(9, 8)
+#define PR_COMMAND_C (0x0a * 2)
+#define PR_COMMAND_C_SS BIT(0)
+
+#define PR_CLOCK_STATUS_C (0x0c * 2)
+#define PR_CLOCK_STATUS_C_ENA_DRIFT BIT(12)
+#define PR_CLOCK_STATUS_C_OFS_ACT BIT(13)
+#define PR_CLOCK_STATUS_C_ENA_OFS BIT(14)
+
+#define PR_CLOCK_READ_C (0x0d * 2)
+#define PR_CLOCK_WRITE_C (0x0e * 2)
+#define PR_CLOCK_OFFSET_C (0x0f * 2)
+#define PR_CLOCK_DRIFT_C (0x10 * 2)
+
+#define PR_SS_FREE_DATA_C (0x12 * 2)
+#define PR_SS_SYNT_DATA_C (0x14 * 2)
+#define PR_SS_SYNC_DATA_C (0x16 * 2)
+#define PR_SS_DRAC_DATA_C (0x18 * 2)
+
+#define STATUS_OUT (0x60 * 2)
+#define STATUS_OUT_SYNC_GOOD BIT(0)
+#define STATUS_OUT_IS_GM BIT(1)
+
+int hellcreek_ptp_setup(struct hellcreek *hellcreek);
+void hellcreek_ptp_free(struct hellcreek *hellcreek);
+u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset);
+void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset);
+u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns);
+
+#define ptp_to_hellcreek(ptp) \
+ container_of(ptp, struct hellcreek, ptp_clock_info)
+
+#define dw_overflow_to_hellcreek(dw) \
+ container_of(dw, struct hellcreek, overflow_work)
+
+#define led_to_hellcreek(ldev, led) \
+ container_of(ldev, struct hellcreek, led)
+
+#endif /* _HELLCREEK_PTP_H_ */
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index de7692b763d8..6408402a44f5 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -558,7 +558,7 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
break;
- };
+ }
/* Set feedback divide ratio update signal to high */
val = mt7530_read(priv, MT7531_PLLGP_CR0);
@@ -1021,6 +1021,53 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
mutex_unlock(&priv->reg_mutex);
}
+static int
+mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mii_bus *bus = priv->bus;
+ int length;
+ u32 val;
+
+ /* When a new MTU is set, DSA always set the CPU port's MTU to the
+ * largest MTU of the slave ports. Because the switch only has a global
+ * RX length register, only allowing CPU port here is enough.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = mt7530_mii_read(priv, MT7530_GMACCR);
+ val &= ~MAX_RX_PKT_LEN_MASK;
+
+ /* RX length also includes Ethernet header, MTK tag, and FCS length */
+ length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN;
+ if (length <= 1522) {
+ val |= MAX_RX_PKT_LEN_1522;
+ } else if (length <= 1536) {
+ val |= MAX_RX_PKT_LEN_1536;
+ } else if (length <= 1552) {
+ val |= MAX_RX_PKT_LEN_1552;
+ } else {
+ val &= ~MAX_RX_JUMBO_MASK;
+ val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024));
+ val |= MAX_RX_PKT_LEN_JUMBO;
+ }
+
+ mt7530_mii_write(priv, MT7530_GMACCR, val);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return 0;
+}
+
+static int
+mt7530_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return MT7530_MAX_MTU;
+}
+
static void
mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
@@ -2519,6 +2566,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.get_sset_count = mt7530_get_sset_count,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
+ .port_change_mtu = mt7530_port_change_mtu,
+ .port_max_mtu = mt7530_port_max_mtu,
.port_stp_state_set = mt7530_stp_state_set,
.port_bridge_join = mt7530_port_bridge_join,
.port_bridge_leave = mt7530_port_bridge_leave,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 9278a8e3d04e..ee3523a7537e 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -11,6 +11,9 @@
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
+#define MTK_HDR_LEN 4
+#define MT7530_MAX_MTU (15 * 1024 - ETH_HLEN - ETH_FCS_LEN - MTK_HDR_LEN)
+
enum mt753x_id {
ID_MT7530 = 0,
ID_MT7621 = 1,
@@ -289,6 +292,15 @@ enum mt7530_vlan_port_attr {
#define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
#define MT7531_DIS_CLR BIT(31)
+#define MT7530_GMACCR 0x30e0
+#define MAX_RX_JUMBO(x) ((x) << 2)
+#define MAX_RX_JUMBO_MASK GENMASK(5, 2)
+#define MAX_RX_PKT_LEN_MASK GENMASK(1, 0)
+#define MAX_RX_PKT_LEN_1522 0x0
+#define MAX_RX_PKT_LEN_1536 0x1
+#define MAX_RX_PKT_LEN_1552 0x2
+#define MAX_RX_PKT_LEN_JUMBO 0x3
+
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
#define MT7530_MIB_CCR 0x4fe0
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 34cca0a4b31c..e8258db8c21e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1442,7 +1442,7 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
{
- if (!chip->info->max_vid)
+ if (!mv88e6xxx_max_vid(chip))
return 0;
return mv88e6xxx_g1_vtu_flush(chip);
@@ -1484,7 +1484,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
}
/* Set every FID bit used by the VLAN entries */
- vlan.vid = chip->info->max_vid;
+ vlan.vid = mv88e6xxx_max_vid(chip);
vlan.valid = false;
do {
@@ -1496,7 +1496,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
break;
set_bit(vlan.fid, fid_bitmap);
- } while (vlan.vid < chip->info->max_vid);
+ } while (vlan.vid < mv88e6xxx_max_vid(chip));
return 0;
}
@@ -1587,7 +1587,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
int err;
if (switchdev_trans_ph_prepare(trans))
- return chip->info->max_vid ? 0 : -EOPNOTSUPP;
+ return mv88e6xxx_max_vid(chip) ? 0 : -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
@@ -1603,7 +1603,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!chip->info->max_vid)
+ if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
/* If the requested port doesn't belong to the same bridge as the VLAN
@@ -1973,7 +1973,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
u8 member;
u16 vid;
- if (!chip->info->max_vid)
+ if (!mv88e6xxx_max_vid(chip))
return;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
@@ -2051,7 +2051,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
u16 pvid, vid;
int err = 0;
- if (!chip->info->max_vid)
+ if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -2157,7 +2157,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
return err;
/* Dump VLANs' Filtering Information Databases */
- vlan.vid = chip->info->max_vid;
+ vlan.vid = mv88e6xxx_max_vid(chip);
vlan.valid = false;
do {
@@ -2172,7 +2172,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
cb, data);
if (err)
return err;
- } while (vlan.vid < chip->info->max_vid);
+ } while (vlan.vid < mv88e6xxx_max_vid(chip));
return err;
}
@@ -2853,6 +2853,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
+ ds->configure_vlan_while_not_filtering = true;
mv88e6xxx_reg_lock(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 81c244fc0419..7faa61b7f8f8 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -245,6 +245,7 @@ enum mv88e6xxx_region_id {
MV88E6XXX_REGION_GLOBAL1 = 0,
MV88E6XXX_REGION_GLOBAL2,
MV88E6XXX_REGION_ATU,
+ MV88E6XXX_REGION_VTU,
_MV88E6XXX_REGION_MAX,
};
@@ -672,6 +673,11 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
return chip->info->num_ports;
}
+static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_vid;
+}
+
static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
{
return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index ade04c036fd9..21953d6d484c 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -417,6 +417,92 @@ out:
return err;
}
+/**
+ * struct mv88e6xxx_devlink_vtu_entry - Devlink VTU entry
+ * @fid: Global1/2: FID and VLAN policy.
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @op: Global1/5: FID (old chipsets).
+ * @vid: Global1/6: VID, valid, and page.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. Also happens to align the size to 16B.
+ *
+ * The VTU entry format varies between chipset generations, the
+ * descriptions above represent the superset of all possible
+ * information, not all fields are valid on all devices. Since this is
+ * a low-level debug interface, copy all data verbatim and defer
+ * parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_vtu_entry {
+ u16 fid;
+ u16 sid;
+ u16 op;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_vtu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_vid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_vtu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ vlan.vid = mv88e6xxx_max_vid(chip);
+ vlan.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_vtu_getnext(chip, &vlan);
+ if (err)
+ break;
+
+ if (!vlan.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_FID,
+ &entry->fid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP,
+ &entry->op);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (vlan.vid < mv88e6xxx_max_vid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port,
const struct devlink_port_region_ops *ops,
struct netlink_ext_ack *extack,
@@ -475,6 +561,12 @@ static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
.destructor = kfree,
};
+static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
+ .name = "vtu",
+ .snapshot = mv88e6xxx_region_vtu_snapshot,
+ .destructor = kfree,
+};
+
static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
.name = "port",
.snapshot = mv88e6xxx_region_port_snapshot,
@@ -498,6 +590,10 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
.ops = &mv88e6xxx_region_atu_ops
/* calculated at runtime */
},
+ [MV88E6XXX_REGION_VTU] = {
+ .ops = &mv88e6xxx_region_vtu_ops
+ /* calculated at runtime */
+ },
};
static void
@@ -576,9 +672,16 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
ops = mv88e6xxx_regions[i].ops;
size = mv88e6xxx_regions[i].size;
- if (i == MV88E6XXX_REGION_ATU)
+ switch (i) {
+ case MV88E6XXX_REGION_ATU:
size = mv88e6xxx_num_databases(chip) *
sizeof(struct mv88e6xxx_devlink_atu_entry);
+ break;
+ case MV88E6XXX_REGION_VTU:
+ size = mv88e6xxx_max_vid(chip) *
+ sizeof(struct mv88e6xxx_devlink_vtu_entry);
+ break;
+ }
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region))
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index e05abe61fa11..80a182c5b98a 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -330,6 +330,8 @@ void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash);
int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash);
+int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 1048509a849b..66ddf67b8737 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -307,8 +307,8 @@ static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
return 0;
}
-static int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
{
int err;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index f791860d495f..ada75fa15861 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -112,10 +112,32 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
ocelot_port_bridge_leave(ocelot, port, br);
}
-/* This callback needs to be present */
static int felix_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
+ struct ocelot *ocelot = ds->priv;
+ u16 vid, flags = vlan->flags;
+ int err;
+
+ /* Ocelot switches copy frames as-is to the CPU, so the flags:
+ * egress-untagged or not, pvid or not, make no difference. This
+ * behavior is already better than what DSA just tries to approximate
+ * when it installs the VLAN with the same flags on the CPU port.
+ * Just accept any configuration, and don't let ocelot deny installing
+ * multiple native VLANs on the NPI port, because the switch doesn't
+ * look at the port tag settings towards the NPI interface anyway.
+ */
+ if (port == ocelot->npi)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ocelot_vlan_prepare(ocelot, port, vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -135,9 +157,6 @@ static void felix_vlan_add(struct dsa_switch *ds, int port,
u16 vid;
int err;
- if (dsa_is_cpu_port(ds, port))
- flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
-
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
err = ocelot_vlan_add(ocelot, port, vid,
flags & BRIDGE_VLAN_INFO_PVID,
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bab3a9bb5e6f..f82ad7419508 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -124,7 +124,7 @@ static void dummy_setup(struct net_device *dev)
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
- dev->features |= NETIF_F_ALL_TSO;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_ENCAP_ALL;
dev->hw_features |= dev->features;
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index d60a86aa8aa8..9aac7119d382 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -175,7 +175,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
default:
return MAC8390_APPLE;
}
- break;
case NUBUS_DRSW_APPLE:
switch (fres->dr_hw) {
@@ -186,11 +185,9 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
default:
return MAC8390_APPLE;
}
- break;
case NUBUS_DRSW_ASANTE:
return MAC8390_ASANTE;
- break;
case NUBUS_DRSW_TECHWORKS:
case NUBUS_DRSW_DAYNA2:
@@ -199,11 +196,9 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
return MAC8390_CABLETRON;
else
return MAC8390_APPLE;
- break;
case NUBUS_DRSW_FARALLON:
return MAC8390_FARALLON;
- break;
case NUBUS_DRSW_KINETICS:
switch (fres->dr_hw) {
@@ -212,7 +207,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
default:
return MAC8390_KINETICS;
}
- break;
case NUBUS_DRSW_DAYNA:
/*
@@ -224,7 +218,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
return MAC8390_NONE;
else
return MAC8390_DAYNA;
- break;
}
return MAC8390_NONE;
}
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 1c97e39b478e..e9756d0ea5b8 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -710,7 +710,7 @@ static void ne_block_output(struct net_device *dev, int count,
retry:
#endif
-#ifdef NE8390_RW_BUGFIX
+#ifdef NE_RW_BUGFIX
/* Handle the read-before-write bug the same way as the
Crynwr packet driver -- the NatSemi method doesn't work.
Actually this doesn't always work either, but if you have
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index bc6edb3f1af3..d6715008e04d 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -610,7 +610,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
/* We should already be in page 0, but to be safe... */
outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
-#ifdef NE8390_RW_BUGFIX
+#ifdef NE_RW_BUGFIX
/* Handle the read-before-write bug the same way as the
* Crynwr packet driver -- the NatSemi method doesn't work.
* Actually this doesn't always work either, but if you have
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index bf5e0e9bd0e2..6c049864dac0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1474,7 +1474,7 @@ int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
cfg->prio_tc_map[i] = cfg->tcs * i / 8;
- cfg->is_qos = (tcs != 0 ? true : false);
+ cfg->is_qos = !!tcs;
cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
if (!cfg->is_ptp)
netdev_warn(self->ndev, "%s\n",
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5de47f6fde5a..1f5da4e4f4b2 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -77,10 +77,12 @@
#define MACB_RBQPH 0x04D4
/* GEM register offsets. */
+#define GEM_NCR 0x0000 /* Network Control */
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
#define GEM_JML 0x0048 /* Jumbo Max Length */
+#define GEM_HS_MAC_CONFIG 0x0050 /* GEM high speed config */
#define GEM_HRB 0x0080 /* Hash Bottom */
#define GEM_HRT 0x0084 /* Hash Top */
#define GEM_SA1B 0x0088 /* Specific1 Bottom */
@@ -166,6 +168,9 @@
#define GEM_DCFG7 0x0298 /* Design Config 7 */
#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_DCFG10 0x02A4 /* Design Config 10 */
+#define GEM_DCFG12 0x02AC /* Design Config 12 */
+#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */
+#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */
#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
@@ -272,11 +277,19 @@
#define MACB_IRXFCS_OFFSET 19
#define MACB_IRXFCS_SIZE 1
+/* GEM specific NCR bitfields. */
+#define GEM_ENABLE_HS_MAC_OFFSET 31
+#define GEM_ENABLE_HS_MAC_SIZE 1
+
/* GEM specific NCFGR bitfields. */
+#define GEM_FD_OFFSET 1 /* Full duplex */
+#define GEM_FD_SIZE 1
#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
#define GEM_GBE_SIZE 1
#define GEM_PCSSEL_OFFSET 11
#define GEM_PCSSEL_SIZE 1
+#define GEM_PAE_OFFSET 13 /* Pause enable */
+#define GEM_PAE_SIZE 1
#define GEM_CLK_OFFSET 18 /* MDC clock division */
#define GEM_CLK_SIZE 3
#define GEM_DBW_OFFSET 21 /* Data bus width */
@@ -461,11 +474,17 @@
#define MACB_REV_OFFSET 0
#define MACB_REV_SIZE 16
+/* Bitfield in HS_MAC_CONFIG */
+#define GEM_HS_MAC_SPEED_OFFSET 0
+#define GEM_HS_MAC_SPEED_SIZE 3
+
/* Bitfields in DCFG1. */
#define GEM_IRQCOR_OFFSET 23
#define GEM_IRQCOR_SIZE 1
#define GEM_DBWDEF_OFFSET 25
#define GEM_DBWDEF_SIZE 3
+#define GEM_NO_PCS_OFFSET 0
+#define GEM_NO_PCS_SIZE 1
/* Bitfields in DCFG2. */
#define GEM_RX_PKT_BUFF_OFFSET 20
@@ -500,6 +519,28 @@
#define GEM_RXBD_RDBUFF_OFFSET 8
#define GEM_RXBD_RDBUFF_SIZE 4
+/* Bitfields in DCFG12. */
+#define GEM_HIGH_SPEED_OFFSET 26
+#define GEM_HIGH_SPEED_SIZE 1
+
+/* Bitfields in USX_CONTROL. */
+#define GEM_USX_CTRL_SPEED_OFFSET 14
+#define GEM_USX_CTRL_SPEED_SIZE 3
+#define GEM_SERDES_RATE_OFFSET 12
+#define GEM_SERDES_RATE_SIZE 2
+#define GEM_RX_SCR_BYPASS_OFFSET 9
+#define GEM_RX_SCR_BYPASS_SIZE 1
+#define GEM_TX_SCR_BYPASS_OFFSET 8
+#define GEM_TX_SCR_BYPASS_SIZE 1
+#define GEM_TX_EN_OFFSET 1
+#define GEM_TX_EN_SIZE 1
+#define GEM_SIGNAL_OK_OFFSET 0
+#define GEM_SIGNAL_OK_SIZE 1
+
+/* Bitfields in USX_STATUS. */
+#define GEM_USX_BLOCK_LOCK_OFFSET 0
+#define GEM_USX_BLOCK_LOCK_SIZE 1
+
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCRL_OFFSET 24
@@ -663,6 +704,8 @@
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
+#define MACB_CAPS_PCS 0x01000000
+#define MACB_CAPS_HIGH_SPEED 0x02000000
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
@@ -1201,6 +1244,7 @@ struct macb {
struct mii_bus *mii_bus;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
u32 caps;
unsigned int dma_burst_length;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 286f0341bdf8..7b1d195787dc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -84,6 +84,9 @@ struct sifive_fu540_macb_mgmt {
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
+#define HS_SPEED_10000M 4
+#define MACB_SERDES_RATE_10G 1
+
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
@@ -513,6 +516,7 @@ static void macb_validate(struct phylink_config *config,
state->interface != PHY_INTERFACE_MODE_RMII &&
state->interface != PHY_INTERFACE_MODE_GMII &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_10GBASER &&
!phy_interface_mode_is_rgmii(state->interface)) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
return;
@@ -525,10 +529,31 @@ static void macb_validate(struct phylink_config *config,
return;
}
+ if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
+ !(bp->caps & MACB_CAPS_HIGH_SPEED &&
+ bp->caps & MACB_CAPS_PCS)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, Asym_Pause);
+ if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
+ (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_10GBASER)) {
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseER_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseT_Full);
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ goto out;
+ }
+
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
@@ -545,23 +570,90 @@ static void macb_validate(struct phylink_config *config,
if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
phylink_set(mask, 1000baseT_Half);
}
-
+out:
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void macb_mac_pcs_get_state(struct phylink_config *config,
+static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex)
+{
+ struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ u32 config;
+
+ config = gem_readl(bp, USX_CONTROL);
+ config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config);
+ config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config);
+ config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS));
+ config |= GEM_BIT(TX_EN);
+ gem_writel(bp, USX_CONTROL, config);
+}
+
+static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
+ struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ u32 val;
+
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = gem_readl(bp, USX_STATUS);
+ state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
+ val = gem_readl(bp, NCFGR);
+ if (val & GEM_BIT(PAE))
+ state->pause = MLO_PAUSE_RX;
+}
+
+static int macb_usx_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+
+ gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
+ GEM_BIT(SIGNAL_OK));
+
+ return 0;
+}
+
+static void macb_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
state->link = 0;
}
-static void macb_mac_an_restart(struct phylink_config *config)
+static void macb_pcs_an_restart(struct phylink_pcs *pcs)
{
/* Not supported */
}
+static int macb_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = {
+ .pcs_get_state = macb_usx_pcs_get_state,
+ .pcs_config = macb_usx_pcs_config,
+ .pcs_link_up = macb_usx_pcs_link_up,
+};
+
+static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
+ .pcs_get_state = macb_pcs_get_state,
+ .pcs_an_restart = macb_pcs_an_restart,
+ .pcs_config = macb_pcs_config,
+};
+
static void macb_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -569,25 +661,35 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
struct macb *bp = netdev_priv(ndev);
unsigned long flags;
u32 old_ctrl, ctrl;
+ u32 old_ncr, ncr;
spin_lock_irqsave(&bp->lock, flags);
old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
+ old_ncr = ncr = macb_or_gem_readl(bp, NCR);
if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
if (state->interface == PHY_INTERFACE_MODE_RMII)
ctrl |= MACB_BIT(RM9200_RMII);
} else if (macb_is_gem(bp)) {
ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
+ ncr &= ~GEM_BIT(ENABLE_HS_MAC);
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
+ } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
+ ctrl |= GEM_BIT(PCSSEL);
+ ncr |= GEM_BIT(ENABLE_HS_MAC);
+ }
}
/* Apply the new configuration, if any */
if (old_ctrl ^ ctrl)
macb_or_gem_writel(bp, NCFGR, ctrl);
+ if (old_ncr ^ ncr)
+ macb_or_gem_writel(bp, NCR, ncr);
+
spin_unlock_irqrestore(&bp->lock, flags);
}
@@ -664,6 +766,10 @@ static void macb_mac_link_up(struct phylink_config *config,
macb_or_gem_writel(bp, NCFGR, ctrl);
+ if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
+ gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
+ gem_readl(bp, HS_MAC_CONFIG)));
+
spin_unlock_irqrestore(&bp->lock, flags);
/* Enable Rx and Tx */
@@ -672,10 +778,28 @@ static void macb_mac_link_up(struct phylink_config *config,
netif_tx_wake_all_queues(ndev);
}
+static int macb_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_10GBASER)
+ bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops;
+ else if (interface == PHY_INTERFACE_MODE_SGMII)
+ bp->phylink_pcs.ops = &macb_phylink_pcs_ops;
+ else
+ bp->phylink_pcs.ops = NULL;
+
+ if (bp->phylink_pcs.ops)
+ phylink_set_pcs(bp->phylink, &bp->phylink_pcs);
+
+ return 0;
+}
+
static const struct phylink_mac_ops macb_phylink_ops = {
.validate = macb_validate,
- .mac_pcs_get_state = macb_mac_pcs_get_state,
- .mac_an_restart = macb_mac_an_restart,
+ .mac_prepare = macb_mac_prepare,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
.mac_link_up = macb_mac_link_up,
@@ -3524,6 +3648,11 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG1);
if (GEM_BFEXT(IRQCOR, dcfg) == 0)
bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+ if (GEM_BFEXT(NO_PCS, dcfg) == 0)
+ bp->caps |= MACB_CAPS_PCS;
+ dcfg = gem_readl(bp, DCFG12);
+ if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1)
+ bp->caps |= MACB_CAPS_HIGH_SPEED;
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
@@ -3582,19 +3711,13 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
}
if (IS_ERR_OR_NULL(*pclk)) {
- err = PTR_ERR(*pclk);
- if (!err)
- err = -ENODEV;
-
+ err = IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV;
dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
return err;
}
if (IS_ERR_OR_NULL(*hclk)) {
- err = PTR_ERR(*hclk);
- if (!err)
- err = -ENODEV;
-
+ err = IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV;
dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
return err;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 2a6d1cadac9e..30254e4cf70f 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -27,7 +27,6 @@
#include "cn66xx_device.h"
#include "cn68xx_device.h"
#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 17410fe86626..7d49fd4edc9e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2671,7 +2671,7 @@ do { \
seq_printf(seq, "%-12s", s); \
for (i = 0; i < n; ++i) \
seq_printf(seq, " %16" fmt_spec, v); \
- seq_putc(seq, '\n'); \
+ seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 2d3dfdd2a716..e7b78b68eaac 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -235,6 +235,7 @@ struct chtls_dev {
struct list_head na_node;
unsigned int send_page_order;
int max_host_sndbuf;
+ u32 round_robin_cnt;
struct key_map kmap;
unsigned int cdev_state;
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 96d561653496..63aacc184f68 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1217,8 +1217,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = csk->snd_win;
csk->ulp_mode = ULP_MODE_TLS;
step = cdev->lldi->nrxq / cdev->lldi->nchan;
- csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
rxq_idx = port_id * step;
+ rxq_idx += cdev->round_robin_cnt++ % step;
+ csk->rss_qid = cdev->lldi->rxq_ids[rxq_idx];
csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
port_id * step;
csk->sndbuf = newsk->sk_sndbuf;
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 76247103e566..7af86b6d4150 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -5,7 +5,7 @@
config DM9000
tristate "DM9000 support"
- depends on ARM || MIPS || COLDFIRE || NIOS2
+ depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST
select CRC32
select MII
help
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 5c6c8c5ec747..3fdc70dab5c1 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -232,32 +232,29 @@ static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
{
int i;
- int tmp;
for (i = 0; i < count; i++)
- tmp = readb(reg);
+ readb(reg);
}
static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
{
int i;
- int tmp;
count = (count + 1) >> 1;
for (i = 0; i < count; i++)
- tmp = readw(reg);
+ readw(reg);
}
static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
{
int i;
- int tmp;
count = (count + 3) >> 2;
for (i = 0; i < count; i++)
- tmp = readl(reg);
+ readl(reg);
}
/*
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d9f6c19940ef..c3cbe55205a7 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -2175,11 +2175,21 @@ out:
static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume);
+static void de_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ rtnl_lock();
+ dev_close(dev);
+ rtnl_unlock();
+}
+
static struct pci_driver de_driver = {
.name = DRV_NAME,
.id_table = de_pci_tbl,
.probe = de_init_one,
.remove = de_remove_one,
+ .shutdown = de_shutdown,
.driver.pm = &de_pm_ops,
};
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index e7b0d7de40fd..c1dcd6ca1457 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1293,7 +1293,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static unsigned char last_phys_addr[ETH_ALEN] = {
0x00, 'L', 'i', 'n', 'u', 'x'
};
+#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
static int last_irq;
+#endif
int i, irq;
unsigned short sum;
unsigned char *ee_data;
@@ -1617,7 +1619,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < 6; i++)
last_phys_addr[i] = dev->dev_addr[i];
+#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
last_irq = irq;
+#endif
/* The lower four bits are the media type. */
if (board_idx >= 0 && board_idx < MAX_UNITS) {
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 80fb1f537bb3..88bfe2107938 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1044,10 +1044,39 @@ static void ftgmac100_adjust_link(struct net_device *netdev)
schedule_work(&priv->reset_task);
}
-static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
+static int ftgmac100_mii_probe(struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ struct device_node *np = pdev->dev.of_node;
struct phy_device *phydev;
+ phy_interface_t phy_intf;
+ int err;
+
+ /* Default to RGMII. It's a gigabit part after all */
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
phydev = phy_find_first(priv->mii_bus);
if (!phydev) {
@@ -1056,7 +1085,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
}
phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, intf);
+ &ftgmac100_adjust_link, phy_intf);
if (IS_ERR(phydev)) {
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
@@ -1601,8 +1630,8 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
- phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII;
struct device_node *np = pdev->dev.of_node;
+ struct device_node *mdio_np;
int i, err = 0;
u32 reg;
@@ -1623,39 +1652,6 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
}
- /* Get PHY mode from device-tree */
- if (np) {
- /* Default to RGMII. It's a gigabit part after all */
- err = of_get_phy_mode(np, &phy_intf);
- if (err)
- phy_intf = PHY_INTERFACE_MODE_RGMII;
-
- /* Aspeed only supports these. I don't know about other IP
- * block vendors so I'm going to just let them through for
- * now. Note that this is only a warning if for some obscure
- * reason the DT really means to lie about it or it's a newer
- * part we don't know about.
- *
- * On the Aspeed SoC there are additionally straps and SCU
- * control bits that could tell us what the interface is
- * (or allow us to configure it while the IP block is held
- * in reset). For now I chose to keep this driver away from
- * those SoC specific bits and assume the device-tree is
- * right and the SCU has been configured properly by pinmux
- * or the firmware.
- */
- if (priv->is_aspeed &&
- phy_intf != PHY_INTERFACE_MODE_RMII &&
- phy_intf != PHY_INTERFACE_MODE_RGMII &&
- phy_intf != PHY_INTERFACE_MODE_RGMII_ID &&
- phy_intf != PHY_INTERFACE_MODE_RGMII_RXID &&
- phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) {
- netdev_warn(netdev,
- "Unsupported PHY mode %s !\n",
- phy_modes(phy_intf));
- }
- }
-
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
@@ -1667,35 +1663,38 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
for (i = 0; i < PHY_MAX_ADDR; i++)
priv->mii_bus->irq[i] = PHY_POLL;
- err = mdiobus_register(priv->mii_bus);
+ mdio_np = of_get_child_by_name(np, "mdio");
+
+ err = of_mdiobus_register(priv->mii_bus, mdio_np);
if (err) {
dev_err(priv->dev, "Cannot register MDIO bus!\n");
goto err_register_mdiobus;
}
- err = ftgmac100_mii_probe(priv, phy_intf);
- if (err) {
- dev_err(priv->dev, "MII Probe failed!\n");
- goto err_mii_probe;
- }
+ of_node_put(mdio_np);
return 0;
-err_mii_probe:
- mdiobus_unregister(priv->mii_bus);
err_register_mdiobus:
mdiobus_free(priv->mii_bus);
return err;
}
+static void ftgmac100_phy_disconnect(struct net_device *netdev)
+{
+ if (!netdev->phydev)
+ return;
+
+ phy_disconnect(netdev->phydev);
+}
+
static void ftgmac100_destroy_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- if (!netdev->phydev)
+ if (!priv->mii_bus)
return;
- phy_disconnect(netdev->phydev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
}
@@ -1830,22 +1829,33 @@ static int ftgmac100_probe(struct platform_device *pdev)
if (np && of_get_property(np, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
dev_err(&pdev->dev, "NCSI stack not enabled\n");
- goto err_ncsi_dev;
+ goto err_phy_connect;
}
dev_info(&pdev->dev, "Using NCSI interface\n");
priv->use_ncsi = true;
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
- goto err_ncsi_dev;
+ goto err_phy_connect;
} else if (np && of_get_property(np, "phy-handle", NULL)) {
struct phy_device *phy;
+ /* Support "mdio"/"phy" child nodes for ast2400/2500 with
+ * an embedded MDIO controller. Automatically scan the DTS for
+ * available PHYs and register them.
+ */
+ if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ err = ftgmac100_setup_mdio(netdev);
+ if (err)
+ goto err_setup_mdio;
+ }
+
phy = of_phy_get_and_connect(priv->netdev, np,
&ftgmac100_adjust_link);
if (!phy) {
dev_err(&pdev->dev, "Failed to connect to phy\n");
- goto err_setup_mdio;
+ goto err_phy_connect;
}
/* Indicate that we support PAUSE frames (see comment in
@@ -1865,12 +1875,19 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = ftgmac100_setup_mdio(netdev);
if (err)
goto err_setup_mdio;
+
+ err = ftgmac100_mii_probe(netdev);
+ if (err) {
+ dev_err(priv->dev, "MII probe failed!\n");
+ goto err_ncsi_dev;
+ }
+
}
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
- goto err_ncsi_dev;
+ goto err_phy_connect;
}
/* Default ring sizes */
@@ -1906,6 +1923,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
err_register_netdev:
clk_disable_unprepare(priv->rclk);
clk_disable_unprepare(priv->clk);
+err_phy_connect:
+ ftgmac100_phy_disconnect(netdev);
err_ncsi_dev:
if (priv->ndev)
ncsi_unregister_dev(priv->ndev);
@@ -1940,6 +1959,7 @@ static int ftgmac100_remove(struct platform_device *pdev)
*/
cancel_work_sync(&priv->reset_task);
+ ftgmac100_phy_disconnect(netdev);
ftgmac100_destroy_mdio(netdev);
iounmap(priv->base);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index d9c285948fc2..8867693e0fb3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2305,9 +2305,9 @@ static void dpaa_tx_conf(struct net_device *net_dev,
}
static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
- struct qman_portal *portal)
+ struct qman_portal *portal, bool sched_napi)
{
- if (unlikely(in_irq() || !in_serving_softirq())) {
+ if (sched_napi) {
/* Disable QMan IRQ and invoke NAPI */
qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
@@ -2321,7 +2321,8 @@ static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
struct dpaa_percpu_priv *percpu_priv;
@@ -2337,7 +2338,7 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_eth_refill_bpools(priv);
@@ -2348,7 +2349,8 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct skb_shared_hwtstamps *shhwtstamps;
struct rtnl_link_stats64 *percpu_stats;
@@ -2380,7 +2382,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
percpu_stats = &percpu_priv->stats;
- if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
return qman_cb_dqrr_stop;
/* Make sure we didn't run out of buffers */
@@ -2465,7 +2467,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
@@ -2476,7 +2479,7 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
@@ -2486,7 +2489,8 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
@@ -2500,7 +2504,7 @@ static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
@@ -2546,7 +2550,7 @@ static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- percpu_priv->np.down = 0;
+ percpu_priv->np.down = false;
napi_enable(&percpu_priv->np.napi);
}
}
@@ -2559,7 +2563,7 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- percpu_priv->np.down = 1;
+ percpu_priv->np.down = true;
napi_disable(&percpu_priv->np.napi);
}
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index fc2075ea57fe..c78d12229730 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -50,40 +50,6 @@ drop_packet_err:
return NETDEV_TX_OK;
}
-static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
-{
- int l3_start, l3_hsize;
- u16 l3_flags, l4_flags;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return false;
-
- switch (skb->csum_offset) {
- case offsetof(struct tcphdr, check):
- l4_flags = ENETC_TXBD_L4_TCP;
- break;
- case offsetof(struct udphdr, check):
- l4_flags = ENETC_TXBD_L4_UDP;
- break;
- default:
- skb_checksum_help(skb);
- return false;
- }
-
- l3_start = skb_network_offset(skb);
- l3_hsize = skb_network_header_len(skb);
-
- l3_flags = 0;
- if (skb->protocol == htons(ETH_P_IPV6))
- l3_flags = ENETC_TXBD_L3_IPV6;
-
- /* write BD fields */
- txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
- txbd->l4_csoff = l4_flags;
-
- return true;
-}
-
static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
@@ -149,22 +115,16 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
if (do_vlan || do_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
- if (enetc_tx_csum(skb, &temp_bd))
- flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
- else if (tx_ring->tsd_enable)
+ if (tx_ring->tsd_enable)
flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
/* first BD needs frm_len and offload flags set */
temp_bd.frm_len = cpu_to_le16(skb->len);
temp_bd.flags = flags;
- if (flags & ENETC_TXBD_FLAGS_TSE) {
- u32 temp;
-
- temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
- | (flags << ENETC_TXBD_FLAGS_OFFSET);
- temp_bd.txstart = cpu_to_le32(temp);
- }
+ if (flags & ENETC_TXBD_FLAGS_TSE)
+ temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
+ flags);
if (flags & ENETC_TXBD_FLAGS_EX) {
u8 e_flags = 0;
@@ -1925,8 +1885,7 @@ static void enetc_kfree_si(struct enetc_si *si)
static void enetc_detect_errata(struct enetc_si *si)
{
if (si->pdev->revision == ENETC_REV1)
- si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
- ENETC_ERR_UCMCSWP;
+ si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
}
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index dd0fb0c066d7..8532d23b54f5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -147,9 +147,8 @@ struct enetc_msg_swbd {
#define ENETC_REV1 0x1
enum enetc_errata {
- ENETC_ERR_TXCSUM = BIT(0),
- ENETC_ERR_VLAN_ISOL = BIT(1),
- ENETC_ERR_UCMCSWP = BIT(2),
+ ENETC_ERR_VLAN_ISOL = BIT(0),
+ ENETC_ERR_UCMCSWP = BIT(1),
};
#define ENETC_SI_F_QBV BIT(0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index eb6bbf1113c7..861bd04638e0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -477,8 +477,7 @@ union enetc_tx_bd {
__le16 frm_len;
union {
struct {
- __le16 l3_csoff;
- u8 l4_csoff;
+ u8 reserved[3];
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -501,41 +500,37 @@ union enetc_tx_bd {
} wb; /* writeback descriptor */
};
-#define ENETC_TXBD_FLAGS_L4CS BIT(0)
-#define ENETC_TXBD_FLAGS_TSE BIT(1)
-#define ENETC_TXBD_FLAGS_W BIT(2)
-#define ENETC_TXBD_FLAGS_CSUM BIT(3)
-#define ENETC_TXBD_FLAGS_TXSTART BIT(4)
-#define ENETC_TXBD_FLAGS_EX BIT(6)
-#define ENETC_TXBD_FLAGS_F BIT(7)
+enum enetc_txbd_flags {
+ ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_W = BIT(2),
+ ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_TXSTART = BIT(4),
+ ENETC_TXBD_FLAGS_EX = BIT(6),
+ ENETC_TXBD_FLAGS_F = BIT(7)
+};
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+
+static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
+{
+ u32 temp;
+
+ temp = (tx_start >> 5 & ENETC_TXBD_TXSTART_MASK) |
+ (flags << ENETC_TXBD_FLAGS_OFFSET);
+
+ return cpu_to_le32(temp);
+}
+
static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
{
memset(txbd, 0, sizeof(*txbd));
}
-/* L3 csum flags */
-#define ENETC_TXBD_L3_IPCS BIT(7)
-#define ENETC_TXBD_L3_IPV6 BIT(15)
-
-#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0)
-#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8)
-
/* Extension flags */
#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
-static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags)
-{
- return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) |
- (start & ENETC_TXBD_L3_START_MASK));
-}
-
-/* L4 csum flags */
-#define ENETC_TXBD_L4_UDP BIT(5)
-#define ENETC_TXBD_L4_TCP BIT(6)
-
union enetc_rx_bd {
struct {
__le64 addr;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 419306342ac5..ecdc2af8c292 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -714,22 +714,16 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->watchdog_timeo = 5 * HZ;
ndev->max_mtu = ENETC_MAX_MTU;
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
- if (si->errata & ENETC_ERR_TXCSUM) {
- ndev->hw_features &= ~NETIF_F_HW_CSUM;
- ndev->features &= ~NETIF_F_HW_CSUM;
- }
-
ndev->priv_flags |= IFF_UNICAST_FLT;
if (si->hw_features & ENETC_SI_F_QBV)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 7b5c82c7e4e5..39c1a09e69a9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -120,22 +120,16 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->watchdog_timeo = 5 * HZ;
ndev->max_mtu = ENETC_MAX_MTU;
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
- if (si->errata & ENETC_ERR_TXCSUM) {
- ndev->hw_features &= ~NETIF_F_HW_CSUM;
- ndev->features &= ~NETIF_F_HW_CSUM;
- }
-
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 912c51e327d6..f9d4d234a2af 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -278,6 +278,7 @@ struct hnae3_dev_specs {
u16 rss_ind_tbl_size;
u16 rss_key_size;
u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
+ u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index dc9a85745e62..a5ebca888ee0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -349,6 +349,7 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+ dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a362516a3185..999a2aaad847 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -211,8 +211,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
- if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
- !tqp_vector->rx_group.coal.gl_adapt_enable)
+ if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
+ !tqp_vector->rx_group.coal.adapt_enable)
/* According to the hardware, the range of rl_reg is
* 0-59 and the unit is 4.
*/
@@ -224,48 +224,99 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value)
{
- u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+ u32 new_val;
- writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+ if (tqp_vector->rx_group.coal.unit_1us)
+ new_val = gl_value | HNS3_INT_GL_1US;
+ else
+ new_val = hns3_gl_usec_to_reg(gl_value);
+
+ writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
}
void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value)
{
- u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+ u32 new_val;
+
+ if (tqp_vector->tx_group.coal.unit_1us)
+ new_val = gl_value | HNS3_INT_GL_1US;
+ else
+ new_val = hns3_gl_usec_to_reg(gl_value);
+
+ writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value)
+{
+ writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
+}
- writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
+void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value)
+{
+ writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
}
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
- struct hns3_nic_priv *priv)
+static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
+ struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
+ * 3. QL (Interrupt Quantity Limiter)
*
* Default: enable interrupt coalescing self-adaptive and GL
*/
- tqp_vector->tx_group.coal.gl_adapt_enable = 1;
- tqp_vector->rx_group.coal.gl_adapt_enable = 1;
+ tx_coal->adapt_enable = 1;
+ rx_coal->adapt_enable = 1;
+
+ tx_coal->int_gl = HNS3_INT_GL_50K;
+ rx_coal->int_gl = HNS3_INT_GL_50K;
- tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
- tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
+ rx_coal->flow_level = HNS3_FLOW_LOW;
+ tx_coal->flow_level = HNS3_FLOW_LOW;
+
+ /* device version above V3(include V3), GL can configure 1us
+ * unit, so uses 1us unit.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ tx_coal->unit_1us = 1;
+ rx_coal->unit_1us = 1;
+ }
- tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
- tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->ql_enable = 1;
+ rx_coal->ql_enable = 1;
+ tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
+ rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
+ tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ }
}
-static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
- struct hns3_nic_priv *priv)
+static void
+hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
{
+ struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
+ struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
struct hnae3_handle *h = priv->ae_handle;
- hns3_set_vector_coalesce_tx_gl(tqp_vector,
- tqp_vector->tx_group.coal.int_gl);
- hns3_set_vector_coalesce_rx_gl(tqp_vector,
- tqp_vector->rx_group.coal.int_gl);
+ hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
+ if (tx_coal->ql_enable)
+ hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);
+
+ if (rx_coal->ql_enable)
+ hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
}
static int hns3_nic_set_real_num_queue(struct net_device *netdev)
@@ -3333,14 +3384,14 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
return;
- if (rx_group->coal.gl_adapt_enable) {
+ if (rx_group->coal.adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group);
if (rx_update)
hns3_set_vector_coalesce_rx_gl(tqp_vector,
rx_group->coal.int_gl);
}
- if (tx_group->coal.gl_adapt_enable) {
+ if (tx_group->coal.adapt_enable) {
tx_update = hns3_get_new_int_gl(tx_group);
if (tx_update)
hns3_set_vector_coalesce_tx_gl(tqp_vector,
@@ -3536,7 +3587,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
- hns3_vector_gl_rl_init_hw(tqp_vector, priv);
+ hns3_vector_coalesce_init_hw(tqp_vector, priv);
tqp_vector->num_tqps = 0;
}
@@ -3632,7 +3683,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
tqp_vector->idx = i;
tqp_vector->mask_addr = vector[i].io_addr;
tqp_vector->vector_irq = vector[i].vector;
- hns3_vector_gl_rl_init(tqp_vector, priv);
+ hns3_vector_coalesce_init(tqp_vector, priv);
}
out:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1c81dea0da1e..8d3365231bfd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -181,6 +181,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_GL2_OFFSET 0x300
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_VECTOR_TX_QL_OFFSET 0xe00
+#define HNS3_VECTOR_RX_QL_OFFSET 0xf00
#define HNS3_RING_EN_B 0
@@ -418,18 +420,25 @@ enum hns3_flow_level_range {
HNS3_FLOW_ULTRA = 3,
};
-#define HNS3_INT_GL_MAX 0x1FE0
#define HNS3_INT_GL_50K 0x0014
#define HNS3_INT_GL_20K 0x0032
#define HNS3_INT_GL_18K 0x0036
#define HNS3_INT_GL_8K 0x007C
+#define HNS3_INT_GL_1US BIT(31)
+
#define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40
+#define HNS3_INT_QL_DEFAULT_CFG 0x20
+
struct hns3_enet_coalesce {
u16 int_gl;
- u8 gl_adapt_enable;
+ u16 int_ql;
+ u16 int_ql_max;
+ u8 adapt_enable:1;
+ u8 ql_enable:1;
+ u8 unit_1us:1;
enum hns3_flow_level_range flow_level;
};
@@ -595,6 +604,10 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value);
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
+void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value);
+void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value);
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6b07b2771172..c30d5d3786c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1105,9 +1105,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
rx_vector = priv->ring[queue_num + queue].tqp_vector;
cmd->use_adaptive_tx_coalesce =
- tx_vector->tx_group.coal.gl_adapt_enable;
+ tx_vector->tx_group.coal.adapt_enable;
cmd->use_adaptive_rx_coalesce =
- rx_vector->rx_group.coal.gl_adapt_enable;
+ rx_vector->rx_group.coal.adapt_enable;
cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
@@ -1115,6 +1115,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
+ cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
+
return 0;
}
@@ -1127,22 +1130,30 @@ static int hns3_get_coalesce(struct net_device *netdev,
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
u32 rx_gl, tx_gl;
- if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
netdev_err(netdev,
- "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
- HNS3_INT_GL_MAX);
+ "invalid rx-usecs value, rx-usecs range is 0-%u\n",
+ ae_dev->dev_specs.max_int_gl);
return -EINVAL;
}
- if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ if (cmd->tx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
netdev_err(netdev,
- "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
- HNS3_INT_GL_MAX);
+ "invalid tx-usecs value, tx-usecs range is 0-%u\n",
+ ae_dev->dev_specs.max_int_gl);
return -EINVAL;
}
+ /* device version above V3(include V3), GL uses 1us unit,
+ * so the round down is not needed.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ return 0;
+
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
if (rx_gl != cmd->rx_coalesce_usecs) {
netdev_info(netdev,
@@ -1188,6 +1199,29 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev,
return 0;
}
+static int hns3_check_ql_coalesce_param(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
+ !ae_dev->dev_specs.int_ql_max) {
+ netdev_err(netdev, "coalesced frames is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd->tx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max ||
+ cmd->rx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max) {
+ netdev_err(netdev,
+ "invalid coalesced_frames value, range is 0-%u\n",
+ ae_dev->dev_specs.int_ql_max);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
static int hns3_check_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
@@ -1207,6 +1241,10 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
return ret;
}
+ ret = hns3_check_ql_coalesce_param(netdev, cmd);
+ if (ret)
+ return ret;
+
if (cmd->use_adaptive_tx_coalesce == 1 ||
cmd->use_adaptive_rx_coalesce == 1) {
netdev_info(netdev,
@@ -1230,14 +1268,17 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
tx_vector = priv->ring[queue].tqp_vector;
rx_vector = priv->ring[queue_num + queue].tqp_vector;
- tx_vector->tx_group.coal.gl_adapt_enable =
+ tx_vector->tx_group.coal.adapt_enable =
cmd->use_adaptive_tx_coalesce;
- rx_vector->rx_group.coal.gl_adapt_enable =
+ rx_vector->rx_group.coal.adapt_enable =
cmd->use_adaptive_rx_coalesce;
tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs;
rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs;
+ tx_vector->tx_group.coal.int_ql = cmd->tx_max_coalesced_frames;
+ rx_vector->rx_group.coal.int_ql = cmd->rx_max_coalesced_frames;
+
hns3_set_vector_coalesce_tx_gl(tx_vector,
tx_vector->tx_group.coal.int_gl);
hns3_set_vector_coalesce_rx_gl(rx_vector,
@@ -1245,6 +1286,13 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+
+ if (tx_vector->tx_group.coal.ql_enable)
+ hns3_set_vector_coalesce_tx_ql(tx_vector,
+ tx_vector->tx_group.coal.int_ql);
+ if (rx_vector->rx_group.coal.ql_enable)
+ hns3_set_vector_coalesce_rx_ql(rx_vector,
+ rx_vector->rx_group.coal.int_ql);
}
static int hns3_set_coalesce(struct net_device *netdev,
@@ -1471,7 +1519,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
- ETHTOOL_COALESCE_TX_USECS_HIGH)
+ ETHTOOL_COALESCE_TX_USECS_HIGH | \
+ ETHTOOL_COALESCE_MAX_FRAMES)
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 096e26a2e16b..5b7967c309b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1103,6 +1103,14 @@ struct hclge_dev_specs_0_cmd {
__le32 max_tm_rate;
};
+#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
+
+struct hclge_dev_specs_1_cmd {
+ __le32 rsv0;
+ __le16 max_int_gl;
+ u8 rsv1[18];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 3606240025a8..f990f6915226 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -4,7 +4,6 @@
#include "hclge_main.h"
#include "hclge_dcb.h"
#include "hclge_tm.h"
-#include "hclge_dcb.h"
#include "hnae3.h"
#define BW_PERCENT 100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 1f026408ad38..710200119fe8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1366,6 +1366,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1373,14 +1374,18 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_dev_specs_0_cmd *req0;
+ struct hclge_dev_specs_1_cmd *req1;
req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
+ req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+ ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1395,6 +1400,8 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ if (!dev_specs->max_int_gl)
+ dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 9460c128c095..f94f5d443ebc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -285,6 +285,14 @@ struct hclgevf_dev_specs_0_cmd {
u8 rsv1[5];
};
+#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
+
+struct hclgevf_dev_specs_1_cmd {
+ __le32 rsv0;
+ __le16 max_int_gl;
+ u8 rsv1[18];
+};
+
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index c8e3fdd5999c..71007e74e9d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2991,6 +2991,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
HCLGEVF_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
@@ -2998,13 +2999,17 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_dev_specs_0_cmd *req0;
+ struct hclgevf_dev_specs_1_cmd *req1;
req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
+ req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+ ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
}
static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
@@ -3017,6 +3022,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ if (!dev_specs->max_int_gl)
+ dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
}
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 4a9041ee1b39..5d559117f78c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1834,8 +1834,13 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct netdev_queue *nq, bool napi)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
+ struct xdp_frame_bulk bq;
int i;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
for (i = 0; i < num; i++) {
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
@@ -1857,9 +1862,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
if (napi && buf->type == MVNETA_TYPE_XDP_TX)
xdp_return_frame_rx_napi(buf->xdpf);
else
- xdp_return_frame(buf->xdpf);
+ xdp_return_frame_bulk(buf->xdpf, &bq);
}
}
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index f6616c8933ca..3069e192d773 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2440,8 +2440,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
struct mvpp2_txq_pcpu *txq_pcpu, int num)
{
+ struct xdp_frame_bulk bq;
int i;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
for (i = 0; i < num; i++) {
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
@@ -2454,10 +2459,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
dev_kfree_skb_any(tx_buf->skb);
else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
tx_buf->type == MVPP2_TYPE_XDP_NDO)
- xdp_return_frame(tx_buf->xdpf);
+ xdp_return_frame_bulk(tx_buf->xdpf, &bq);
mvpp2_txq_inc_get(txq_pcpu);
}
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
}
static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 2f7a861d0c7b..ffc681b67f1c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -9,4 +9,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o rvu_trace.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 8f17e26dca53..7d0f96290943 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -145,6 +145,16 @@ int cgx_get_cgxid(void *cgxd)
return cgx->cgx_id;
}
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
+
+ return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -814,8 +824,7 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
dev_dbg(dev, "Firmware command interface version = %d.%d\n",
major_ver, minor_ver);
- if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
- minor_ver != CGX_FIRMWARE_MINOR_VER)
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER)
return -EIO;
else
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 27ca3291682b..bcfc3e5f66bb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -27,6 +27,10 @@
/* Registers */
#define CGXX_CMRX_CFG 0x00
+#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59)
+#define CMR_P2X_SEL_SHIFT 59ULL
+#define CMR_P2X_SEL_NIX0 1ULL
+#define CMR_P2X_SEL_NIX1 2ULL
#define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54)
@@ -142,5 +146,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index f48eb66ed021..17f6f42f4453 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -162,6 +162,8 @@ enum nix_scheduler {
#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
#define NIX_RX_ACTIONOP_MCAST (0x3ull)
#define NIX_RX_ACTIONOP_RSS (0x4ull)
+/* Use the RX action set in the default unicast entry */
+#define NIX_RX_ACTION_DEFAULT (0xfull)
/* NIX TX action operation*/
#define NIX_TX_ACTIONOP_DROP (0x0ull)
@@ -174,8 +176,12 @@ enum nix_scheduler {
#define NPC_MCAM_KEY_X2 1
#define NPC_MCAM_KEY_X4 2
-#define NIX_INTF_RX 0
-#define NIX_INTF_TX 1
+#define NIX_INTFX_RX(a) (0x0ull | (a) << 1)
+#define NIX_INTFX_TX(a) (0x1ull | (a) << 1)
+
+/* Default interfaces are NIX0_RX and NIX0_TX */
+#define NIX_INTF_RX NIX_INTFX_RX(0)
+#define NIX_INTF_TX NIX_INTFX_TX(0)
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
@@ -206,6 +212,8 @@ enum ndc_idx_e {
NIX0_RX = 0x0,
NIX0_TX = 0x1,
NPA0_U = 0x2,
+ NIX1_RX = 0x4,
+ NIX1_TX = 0x5,
};
enum ndc_ctype_e {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 263a21129416..cb4e3d86b58b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -86,7 +86,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0001)
+#define OTX2_MBOX_VERSION (0x0007)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -188,10 +188,19 @@ M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
npc_mcam_alloc_and_write_entry_rsp) \
M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
msg_req, npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, msg_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
+ msg_req, npc_mcam_read_base_rule_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
-M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
hwctx_disable_req, msg_rsp) \
@@ -200,7 +209,8 @@ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
-M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
+ nix_vtag_config_rsp) \
M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg_rsp) \
@@ -216,7 +226,6 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
-M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
@@ -271,6 +280,17 @@ struct ready_msg_rsp {
* or to detach partial of a cetain resource type.
* Rest of the fields specify how many of what type to
* be attached.
+ * To request LFs from two blocks of same type this mailbox
+ * can be sent twice as below:
+ * struct rsrc_attach *attach;
+ * .. Allocate memory for message ..
+ * attach->cptlfs = 3; <3 LFs from CPT0>
+ * .. Send message ..
+ * .. Allocate memory for message ..
+ * attach->modify = 1;
+ * attach->cpt_blkaddr = BLKADDR_CPT1;
+ * attach->cptlfs = 2; <2 LFs from CPT1>
+ * .. Send message ..
*/
struct rsrc_attach {
struct mbox_msghdr hdr;
@@ -281,6 +301,7 @@ struct rsrc_attach {
u16 ssow;
u16 timlfs;
u16 cptlfs;
+ int cpt_blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
};
/* Structure for relinquishing resources.
@@ -314,6 +335,8 @@ struct msix_offset_rsp {
u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u8 cpt1_lfs;
+ u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
};
struct get_hw_cap_rsp {
@@ -459,6 +482,20 @@ enum nix_af_status {
NIX_AF_ERR_LSO_CFG_FAIL = -418,
NIX_AF_INVAL_NPA_PF_FUNC = -419,
NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+};
+
+/* For NIX RX vtag action */
+enum nix_rx_vtag0_type {
+ NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
+ NIX_AF_LFX_RX_VTAG_TYPE1,
+ NIX_AF_LFX_RX_VTAG_TYPE2,
+ NIX_AF_LFX_RX_VTAG_TYPE3,
+ NIX_AF_LFX_RX_VTAG_TYPE4,
+ NIX_AF_LFX_RX_VTAG_TYPE5,
+ NIX_AF_LFX_RX_VTAG_TYPE6,
+ NIX_AF_LFX_RX_VTAG_TYPE7,
};
/* For NIX LF context alloc and init */
@@ -491,6 +528,16 @@ struct nix_lf_alloc_rsp {
u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
u16 cints; /* NIX_AF_CONST2::CINTS */
u16 qints; /* NIX_AF_CONST2::QINTS */
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 sdp_links; /* No. of SDP links present in HW */
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ u64 flags;
};
/* NIX AQ enqueue msg */
@@ -583,14 +630,40 @@ struct nix_vtag_config {
union {
/* valid when cfg_type is '0' */
struct {
- /* tx vlan0 tag(C-VLAN) */
- u64 vlan0;
- /* tx vlan1 tag(S-VLAN) */
- u64 vlan1;
- /* insert tx vlan tag */
- u8 insert_vlan :1;
- /* insert tx double vlan tag */
- u8 double_vlan :1;
+ u64 vtag0;
+ u64 vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ u8 cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ u8 cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int vtag0_idx;
+ int vtag1_idx;
+
+ /* free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ u8 free_vtag0 :1;
+ /* free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ u8 free_vtag1 :1;
} tx;
/* valid when cfg_type is '1' */
@@ -605,6 +678,17 @@ struct nix_vtag_config {
};
};
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ int vtag0_idx;
+ int vtag1_idx;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+};
+
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
@@ -865,6 +949,87 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+struct flow_msg {
+ unsigned char dmac[6];
+ unsigned char smac[6];
+ __be16 etype;
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ union {
+ __be32 ip4src;
+ __be32 ip6src[4];
+ };
+ union {
+ __be32 ip4dst;
+ __be32 ip6dst[4];
+ };
+ u8 tos;
+ u8 ip_ver;
+ u8 ip_proto;
+ u8 tc;
+ __be16 sport;
+ __be16 dport;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u64 features;
+ u16 entry;
+ u16 channel;
+ u8 intf;
+ u8 set_cntr; /* If counter is available set counter for this entry ? */
+ u8 default_rule;
+ u8 append; /* overwrite(0) or append(1) flow to default rule? */
+ u16 vf;
+ /* action */
+ u32 index;
+ u16 match_id;
+ u8 flow_key_alg;
+ u8 op;
+ /* vtag rx action */
+ u8 vtag0_type;
+ u8 vtag0_valid;
+ u8 vtag1_type;
+ u8 vtag1_valid;
+ /* vtag tx action */
+ u16 vtag0_def;
+ u8 vtag0_op;
+ u16 vtag1_def;
+ u8 vtag1_op;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ int counter; /* negative if no counter else counter number */
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 start;/*Disable range of entries */
+ u16 end;
+ u8 all; /* PF + VFs */
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* MCAM entry to read */
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u8 intf;
+ u8 enable;
+};
+
+struct npc_mcam_read_base_rule_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry;
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 91a9d00e4fb5..a1f79445db71 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -140,6 +140,63 @@ enum npc_kpu_lh_ltype {
NPC_LT_LH_CUSTOM1 = 0xF,
};
+/* NPC port kind defines how the incoming or outgoing packets
+ * are processed. NPC accepts packets from up to 64 pkinds.
+ * Software assigns pkind for each incoming port such as CGX
+ * Ethernet interfaces, LBK interfaces, etc.
+ */
+enum npc_pkind_type {
+ NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
+};
+
+/* list of known and supported fields in packet header and
+ * fields present in key structure.
+ */
+enum key_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_OUTER_VID,
+ NPC_TOS,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_SPORT_SCTP,
+ NPC_DPORT_SCTP,
+ NPC_HEADER_FIELDS_MAX,
+ NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
+ NPC_PF_FUNC, /* Valid when Tx */
+ NPC_ERRLEV,
+ NPC_ERRCODE,
+ NPC_LXMB,
+ NPC_LA,
+ NPC_LB,
+ NPC_LC,
+ NPC_LD,
+ NPC_LE,
+ NPC_LF,
+ NPC_LG,
+ NPC_LH,
+ /* Ethertype for untagged frame */
+ NPC_ETYPE_ETHER,
+ /* Ethertype for single tagged frame */
+ NPC_ETYPE_TAG1,
+ /* Ethertype for double tagged frame */
+ NPC_ETYPE_TAG2,
+ /* outer vlan tci for single tagged frame */
+ NPC_VLAN_TAG1,
+ /* outer vlan tci for double tagged frame */
+ NPC_VLAN_TAG2,
+ /* other header fields programmed to extract but not of our interest */
+ NPC_UNKNOWN,
+ NPC_KEY_FIELDS_MAX,
+};
+
struct npc_kpu_profile_cam {
u8 state;
u8 state_mask;
@@ -300,11 +357,63 @@ struct nix_rx_action {
/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+/* NPC_PARSE_KEX_S nibble definitions for each field */
+#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0)
+#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3)
+#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4)
+#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6)
+#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7)
+#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9)
+#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10)
+#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12)
+#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13)
+#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15)
+#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16)
+#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18)
+#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19)
+#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21)
+#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22)
+#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24)
+#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25)
+#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27)
+#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28)
+#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
+
+struct nix_tx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_48 :16;
+ u64 match_id :16;
+ u64 index :20;
+ u64 rsvd_11_8 :8;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 rsvd_11_8 :8;
+ u64 index :20;
+ u64 match_id :16;
+ u64 rsvd_63_48 :16;
+#endif
+};
+
/* NIX Receive Vtag Action Structure */
-#define VTAG0_VALID_BIT BIT_ULL(15)
-#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
-#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
-#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG0_VALID_BIT BIT_ULL(15)
+#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG1_VALID_BIT BIT_ULL(47)
+#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
+#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NIX Transmit Vtag Action Structure */
+#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
+#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
+#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
+#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
+#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
struct npc_mcam_kex {
/* MKEX Profle Header */
@@ -357,4 +466,24 @@ struct npc_lt_def_cfg {
struct npc_lt_def pck_iip4;
};
+struct rvu_npc_mcam_rule {
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u8 intf;
+ union {
+ struct nix_tx_action tx_action;
+ struct nix_rx_action rx_action;
+ };
+ u64 vtag_action;
+ struct list_head list;
+ u64 features;
+ u16 owner;
+ u16 entry;
+ u16 cntr;
+ bool has_cntr;
+ u8 default_rule;
+ bool enable;
+ bool vfvlan_cfg;
+};
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 77bb4ed32600..b192692b4fc4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -148,6 +148,20 @@
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
((flags_ena) << 6) | ((key_ofs) & 0x3F))
+/* Rx parse key extract nibble enable */
+#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_LA_LTYPE | \
+ NPC_PARSE_NIBBLE_LB_LTYPE | \
+ NPC_PARSE_NIBBLE_LC_LTYPE | \
+ NPC_PARSE_NIBBLE_LD_LTYPE | \
+ NPC_PARSE_NIBBLE_LE_LTYPE)
+/* Tx parse key extract nibble enable */
+#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \
+ NPC_PARSE_NIBBLE_LB_LTYPE | \
+ NPC_PARSE_NIBBLE_LC_LTYPE | \
+ NPC_PARSE_NIBBLE_LD_LTYPE | \
+ NPC_PARSE_NIBBLE_LE_LTYPE)
+
enum npc_kpu_parser_state {
NPC_S_NA = 0,
NPC_S_KPU1_ETHER,
@@ -13380,14 +13394,15 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
},
};
-static const struct npc_mcam_kex npc_mkex_default = {
+static struct npc_mcam_kex npc_mkex_default = {
.mkex_sign = MKEX_SIGN,
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
- /* nibble: LA..LE (ltype only) + Channel */
- [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
- [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
+ /* nibble: LA..LE (ltype only) + channel */
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
+ /* nibble: LA..LE (ltype only) */
+ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
},
.intf_lid_lt_ld = {
/* Default RX MCAM KEX profile */
@@ -13405,12 +13420,14 @@ static const struct npc_mcam_kex npc_mkex_default = {
/* Layer B: Single VLAN (CTAG) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
- KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
+ KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
+ /* Outer VLAN: 2 bytes, KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
},
[NPC_LT_LB_FDSA] = {
/* SWITCH PORT: 1 byte, KW0[63:48] */
@@ -13436,17 +13453,71 @@ static const struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LD] = {
/* Layer D:UDP */
[NPC_LT_LD_UDP] = {
- /* SPORT: 2 bytes, KW3[15:0] */
- KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
- /* DPORT: 2 bytes, KW3[31:16] */
- KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ },
+ },
+
+ /* Default TX MCAM KEX profile */
+ [NIX_INTF_TX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: NIX_INST_HDR_S + Ethernet */
+ /* NIX appends 8 bytes of NIX_INST_HDR_S at the
+ * start of each TX packet supplied to NPC.
+ */
+ [NPC_LT_LA_IH_NIX_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* DMAC: 6 bytes, KW1[63:16] */
+ KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ [NPC_LT_LB_CTAG] = {
+ /* CTAG VLAN[2..3] KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* CTAG VLAN[2..3] KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* Outer VLAN: 2 bytes, KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* Outer VLAN: 2 Bytes, KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ /* TOS: 1 byte, KW1[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
/* Layer D:TCP */
[NPC_LT_LD_TCP] = {
- /* SPORT: 2 bytes, KW3[15:0] */
- KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
- /* DPORT: 2 bytes, KW3[31:16] */
- KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
},
},
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e1f918960730..9f901c0edcbb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -66,6 +66,7 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_shaping = true;
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
+ hw->rvu = rvu;
if (is_rvu_96xx_B0(rvu)) {
hw->cap.nix_fixed_txschq_mapping = true;
@@ -210,6 +211,9 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
* multiple blocks of same type.
*
* @pcifunc has to be zero when no LF is yet attached.
+ *
+ * For a pcifunc if LFs are attached from multiple blocks of same type, then
+ * return blkaddr of first encountered block.
*/
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
{
@@ -258,20 +262,39 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
devnum = rvu_get_pf(pcifunc);
}
- /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
+ * 'BLKADDR_NIX1'.
+ */
if (blktype == BLKTYPE_NIX) {
- reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
+ RVU_PRIV_HWVFX_NIXX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
- if (cfg)
+ if (cfg) {
blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
+ RVU_PRIV_HWVFX_NIXX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX1;
}
- /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
if (blktype == BLKTYPE_CPT) {
- reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
+ RVU_PRIV_HWVFX_CPTX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
- if (cfg)
+ if (cfg) {
blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
+ RVU_PRIV_HWVFX_CPTX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT1;
}
exit:
@@ -306,31 +329,36 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
block->fn_map[lf] = attach ? pcifunc : 0;
- switch (block->type) {
- case BLKTYPE_NPA:
+ switch (block->addr) {
+ case BLKADDR_NPA:
pfvf->npalf = attach ? true : false;
num_lfs = pfvf->npalf;
break;
- case BLKTYPE_NIX:
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
pfvf->nixlf = attach ? true : false;
num_lfs = pfvf->nixlf;
break;
- case BLKTYPE_SSO:
+ case BLKADDR_SSO:
attach ? pfvf->sso++ : pfvf->sso--;
num_lfs = pfvf->sso;
break;
- case BLKTYPE_SSOW:
+ case BLKADDR_SSOW:
attach ? pfvf->ssow++ : pfvf->ssow--;
num_lfs = pfvf->ssow;
break;
- case BLKTYPE_TIM:
+ case BLKADDR_TIM:
attach ? pfvf->timlfs++ : pfvf->timlfs--;
num_lfs = pfvf->timlfs;
break;
- case BLKTYPE_CPT:
+ case BLKADDR_CPT0:
attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
num_lfs = pfvf->cptlfs;
break;
+ case BLKADDR_CPT1:
+ attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
+ num_lfs = pfvf->cpt1_lfs;
+ break;
}
reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
@@ -466,12 +494,16 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
/* Do a HW reset of all RVU blocks */
rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
}
@@ -695,6 +727,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
u64 *mac;
for (pf = 0; pf < hw->total_pfs; pf++) {
+ /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
+ if (!pf)
+ goto lbkvf;
+
if (!is_pf_cgxmapped(rvu, pf))
continue;
/* Assign MAC address to PF */
@@ -708,8 +744,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
- /* Assign MAC address to VFs */
+lbkvf:
+ /* Assign MAC address to VFs*/
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
for (vf = 0; vf < numvfs; vf++, hwvf++) {
pfvf = &rvu->hwvf[hwvf];
@@ -722,6 +760,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
}
}
}
@@ -757,6 +796,62 @@ static void rvu_fwdata_exit(struct rvu *rvu)
iounmap(rvu->fwdata);
}
+static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init NIX LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ sprintf(block->name, "NIX%d", blkid);
+ rvu->nix_blkaddr[blkid] = blkaddr;
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init CPT LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ sprintf(block->name, "CPT%d", blkid);
+ return rvu_alloc_bitmap(&block->lf);
+}
+
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -791,27 +886,13 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
return err;
nix:
- /* Init NIX LF's bitmap */
- block = &hw->block[BLKADDR_NIX0];
- if (!block->implemented)
- goto sso;
- cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
- block->lf.max = cfg & 0xFFF;
- block->addr = BLKADDR_NIX0;
- block->type = BLKTYPE_NIX;
- block->lfshift = 8;
- block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
- block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
- block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
- block->lfcfg_reg = NIX_PRIV_LFX_CFG;
- block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
- block->lfreset_reg = NIX_AF_LF_RST;
- sprintf(block->name, "NIX");
- err = rvu_alloc_bitmap(&block->lf);
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
+ if (err)
+ return err;
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
if (err)
return err;
-sso:
/* Init SSO group's bitmap */
block = &hw->block[BLKADDR_SSO];
if (!block->implemented)
@@ -877,28 +958,13 @@ tim:
return err;
cpt:
- /* Init CPT LF's bitmap */
- block = &hw->block[BLKADDR_CPT0];
- if (!block->implemented)
- goto init;
- cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
- block->lf.max = cfg & 0xFF;
- block->addr = BLKADDR_CPT0;
- block->type = BLKTYPE_CPT;
- block->multislot = true;
- block->lfshift = 3;
- block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
- block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
- block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
- block->lfcfg_reg = CPT_PRIV_LFX_CFG;
- block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
- block->lfreset_reg = CPT_AF_LF_RST;
- sprintf(block->name, "CPT");
- err = rvu_alloc_bitmap(&block->lf);
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
+ if (err)
+ return err;
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
if (err)
return err;
-init:
/* Allocate memory for PFVF data */
rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
@@ -1025,7 +1091,30 @@ int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
/* Get current count of a RVU block's LF/slots
* provisioned to a given RVU func.
*/
-static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
+{
+ switch (blkaddr) {
+ case BLKADDR_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKADDR_SSO:
+ return pfvf->sso;
+ case BLKADDR_SSOW:
+ return pfvf->ssow;
+ case BLKADDR_TIM:
+ return pfvf->timlfs;
+ case BLKADDR_CPT0:
+ return pfvf->cptlfs;
+ case BLKADDR_CPT1:
+ return pfvf->cpt1_lfs;
+ }
+ return 0;
+}
+
+/* Return true if LFs of block type are attached to pcifunc */
+static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
{
switch (blktype) {
case BLKTYPE_NPA:
@@ -1033,15 +1122,16 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
case BLKTYPE_NIX:
return pfvf->nixlf ? 1 : 0;
case BLKTYPE_SSO:
- return pfvf->sso;
+ return !!pfvf->sso;
case BLKTYPE_SSOW:
- return pfvf->ssow;
+ return !!pfvf->ssow;
case BLKTYPE_TIM:
- return pfvf->timlfs;
+ return !!pfvf->timlfs;
case BLKTYPE_CPT:
- return pfvf->cptlfs;
+ return pfvf->cptlfs || pfvf->cpt1_lfs;
}
- return 0;
+
+ return false;
}
bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
@@ -1054,7 +1144,7 @@ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
pfvf = rvu_get_pfvf(rvu, pcifunc);
/* Check if this PFFUNC has a LF of type blktype attached */
- if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+ if (!is_blktype_attached(pfvf, blktype))
return false;
return true;
@@ -1093,9 +1183,12 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
+ if (blktype == BLKTYPE_NIX)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
block = &hw->block[blkaddr];
- num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
if (!num_lfs)
return;
@@ -1146,6 +1239,8 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
continue;
else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
continue;
+ else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
+ continue;
else if ((blkid == BLKADDR_SSO) && !detach->sso)
continue;
else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
@@ -1154,6 +1249,8 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
continue;
else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
continue;
+ else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
+ continue;
}
rvu_detach_block(rvu, pcifunc, block->type);
}
@@ -1169,8 +1266,73 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
-static void rvu_attach_block(struct rvu *rvu, int pcifunc,
- int blktype, int num_lfs)
+static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr = BLKADDR_NIX0, vf;
+ struct rvu_pfvf *pf;
+
+ /* All CGX mapped PFs are set with assigned NIX block during init */
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ blkaddr = pf->nix_blkaddr;
+ } else if (is_afvf(pcifunc)) {
+ vf = pcifunc - 1;
+ /* Assign NIX based on VF number. All even numbered VFs get
+ * NIX0 and odd numbered gets NIX1
+ */
+ blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
+ /* NIX1 is not present on all silicons */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ switch (blkaddr) {
+ case BLKADDR_NIX1:
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(1);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(1);
+ break;
+ case BLKADDR_NIX0:
+ default:
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(0);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(0);
+ break;
+ }
+
+ return pfvf->nix_blkaddr;
+}
+
+static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
+ u16 pcifunc, struct rsrc_attach *attach)
+{
+ int blkaddr;
+
+ switch (blktype) {
+ case BLKTYPE_NIX:
+ blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
+ break;
+ case BLKTYPE_CPT:
+ if (attach->hdr.ver < RVU_MULTI_BLK_VER)
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
+ BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
+ break;
+ default:
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ };
+
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+
+ return -ENODEV;
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
+ int num_lfs, struct rsrc_attach *attach)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -1182,7 +1344,7 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc,
if (!num_lfs)
return;
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
if (blkaddr < 0)
return;
@@ -1211,12 +1373,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
struct rsrc_attach *req, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, blkaddr;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
- int free_lfs, mappedlfs;
/* Only one NPA LF can be attached */
- if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+ if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
block = &hw->block[BLKADDR_NPA];
free_lfs = rvu_rsrc_free_count(&block->lf);
if (!free_lfs)
@@ -1229,8 +1391,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
}
/* Only one NIX LF can be attached */
- if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
- block = &hw->block[BLKADDR_NIX0];
+ if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
free_lfs = rvu_rsrc_free_count(&block->lf);
if (!free_lfs)
goto fail;
@@ -1250,7 +1416,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->sso, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
/* Check if additional resources are available */
if (req->sso > mappedlfs &&
@@ -1266,7 +1432,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->sso, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->ssow > mappedlfs &&
((req->ssow - mappedlfs) > free_lfs))
@@ -1281,7 +1447,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->timlfs, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->timlfs > mappedlfs &&
((req->timlfs - mappedlfs) > free_lfs))
@@ -1289,14 +1455,18 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
}
if (req->cptlfs) {
- block = &hw->block[BLKADDR_CPT0];
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
if (req->cptlfs > block->lf.max) {
dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
pcifunc, req->cptlfs, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->cptlfs > mappedlfs &&
((req->cptlfs - mappedlfs) > free_lfs))
@@ -1310,6 +1480,22 @@ fail:
return -ENOSPC;
}
+static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
+ struct rsrc_attach *attach)
+{
+ int blkaddr, num_lfs;
+
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
+ attach->hdr.pcifunc, attach);
+ if (blkaddr < 0)
+ return false;
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
+ blkaddr);
+ /* Requester already has LFs from given block ? */
+ return !!num_lfs;
+}
+
int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
@@ -1330,10 +1516,10 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
/* Now attach the requested resources */
if (attach->npalf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
if (attach->nixlf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
@@ -1343,25 +1529,30 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
*/
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
+ attach->sso, attach);
}
if (attach->ssow) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
+ attach->ssow, attach);
}
if (attach->timlfs) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
+ attach->timlfs, attach);
}
if (attach->cptlfs) {
- if (attach->modify)
+ if (attach->modify &&
+ rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
+ attach->cptlfs, attach);
}
exit:
@@ -1439,7 +1630,7 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int lf, slot;
+ int lf, slot, blkaddr;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->msix.bmap)
@@ -1449,8 +1640,14 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
- lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
- rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+ /* Get BLKADDR from which LFs are attached to pcifunc */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ rsp->nix_msixoff = MSIX_VECTOR_INVALID;
+ } else {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
+ }
rsp->sso = pfvf->sso;
for (slot = 0; slot < rsp->sso; slot++) {
@@ -1479,6 +1676,14 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
rsp->cptlf_msixoff[slot] =
rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
}
+
+ rsp->cpt1_lfs = pfvf->cpt1_lfs;
+ for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
+ rsp->cpt1_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
+ }
+
return 0;
}
@@ -1932,7 +2137,7 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
- block->type);
+ block->addr);
if (!num_lfs)
return;
for (slot = 0; slot < num_lfs; slot++) {
@@ -1941,7 +2146,7 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
continue;
/* Cleanup LF and reset it */
- if (block->addr == BLKADDR_NIX0)
+ if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
else if (block->addr == BLKADDR_NPA)
rvu_npa_lf_teardown(rvu, pcifunc, lf);
@@ -1963,7 +2168,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
@@ -2445,7 +2652,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
#define PCI_DEVID_OCTEONTX2_LBK 0xA061
-static int lbk_get_num_chans(void)
+int rvu_get_num_lbk_chans(void)
{
struct pci_dev *pdev;
void __iomem *base;
@@ -2480,7 +2687,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
- chans = lbk_get_num_chans();
+ chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 90eed3160915..37774bac32b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -15,6 +15,7 @@
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
+#include "npc.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
@@ -28,6 +29,7 @@
#define PCI_MBOX_BAR_NUM 4
#define NAME_SIZE 32
+#define MAX_NIX_BLKS 2
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT 10
@@ -104,6 +106,36 @@ struct nix_mce_list {
int max;
};
+/* layer metadata to uniquely identify a packet header field */
+struct npc_layer_mdata {
+ u8 lid;
+ u8 ltype;
+ u8 hdr;
+ u8 key;
+ u8 len;
+};
+
+/* Structure to represent a field present in the
+ * generated key. A key field may present anywhere and can
+ * be of any size in the generated key. Once this structure
+ * is populated for fields of interest then field's presence
+ * and location (if present) can be known.
+ */
+struct npc_key_field {
+ /* Masks where all set bits indicate position
+ * of a field in the key
+ */
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ /* Number of words in the key a field spans. If a field is
+ * of 16 bytes and key offset is 4 then the field will use
+ * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
+ * nr_kws will be 3(KW0, KW1 and KW2).
+ */
+ int nr_kws;
+ /* used by packet header fields */
+ struct npc_layer_mdata layer_mdata;
+};
+
struct npc_mcam {
struct rsrc_bmap counters;
struct mutex lock; /* MCAM entries and counters update lock */
@@ -115,6 +147,7 @@ struct npc_mcam {
u16 *entry2cntr_map;
u16 *cntr2pfvf_map;
u16 *cntr_refcnt;
+ u16 *entry2target_pffunc;
u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */
@@ -127,6 +160,12 @@ struct npc_mcam {
u16 hprio_count;
u16 hprio_end;
u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ /* fields present in the generated key */
+ struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
+ struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
+ u64 tx_features;
+ u64 rx_features;
+ struct list_head mcam_rules;
};
/* Structure for per RVU func info ie PF/VF */
@@ -137,6 +176,7 @@ struct rvu_pfvf {
u16 ssow;
u16 cptlfs;
u16 timlfs;
+ u16 cpt1_lfs;
u8 cgx_lmac;
/* Block LF's MSIX vector info */
@@ -169,19 +209,22 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ u8 pf_set_vf_cfg;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
struct nix_mce_list bcast_mce_list;
- /* VLAN offload */
- struct mcam_entry entry;
- int rxvlan_index;
- bool rxvlan;
+ struct rvu_npc_mcam_rule *def_ucast_rule;
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+
+ u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
+ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
+ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
};
struct nix_txsch {
@@ -218,12 +261,22 @@ struct nix_lso {
u8 in_use;
};
+struct nix_txvlan {
+#define NIX_TX_VTAG_DEF_MAX 0x400
+ struct rsrc_bmap rsrc;
+ u16 *entry2pfvf_map;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+};
+
struct nix_hw {
+ int blkaddr;
+ struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
+ struct nix_txvlan txvlan;
};
/* RVU block's capabilities or functionality,
@@ -251,10 +304,16 @@ struct rvu_hwinfo {
u8 lbk_links;
u8 sdp_links;
u8 npc_kpus; /* No of parser units */
+ u8 npc_pkinds; /* No of port kinds */
+ u8 npc_intfs; /* No of interfaces */
+ u8 npc_kpu_entries; /* No of KPU entries */
+ u16 npc_counters; /* No of match stats counters */
+ bool npc_ext_set; /* Extended register set */
struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
- struct nix_hw *nix0;
+ struct nix_hw *nix;
+ struct rvu *rvu;
struct npc_pkind pkind;
struct npc_mcam mcam;
};
@@ -300,7 +359,7 @@ struct npc_kpu_profile_adapter {
const struct npc_lt_def_cfg *lt_def;
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
- const struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex *mkex;
size_t pkinds;
size_t kpus;
};
@@ -315,6 +374,7 @@ struct rvu {
struct rvu_pfvf *hwvf;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
int vfs; /* Number of VFs attached to RVU */
+ int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
struct mbox_wq_info afpf_wq_info;
@@ -420,6 +480,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
@@ -429,6 +490,7 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+int rvu_get_num_lbk_chans(void);
/* RVU HW reg validation */
enum regmap_block {
@@ -485,6 +547,9 @@ int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -501,8 +566,8 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -513,6 +578,24 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+bool is_npc_intf_tx(u8 intf);
+bool is_npc_intf_rx(u8 intf);
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
+int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel);
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
+const char *npc_get_field_name(u8 hdr);
+bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 pcifunc, u8 intf, struct mcam_entry *entry,
+ int *entry_index);
+int npc_get_bank(struct npc_mcam *mcam, int index);
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable);
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, struct mcam_entry *entry,
+ u8 *intf, u8 *ena);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index fa9152ff5e2a..d298b9357177 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -74,6 +74,20 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
return rvu->cgx_idmap[cgx_id];
}
+/* Based on P2X connectivity find mapped NIX block for a PF */
+static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
+ int cgx_id, int lmac_id)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[pf];
+ u8 p2x;
+
+ p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
+ /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ if (p2x == CMR_P2X_SEL_NIX1)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+}
+
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
@@ -117,6 +131,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 77adad4adb1b..39e1a614aaf8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -224,18 +224,53 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
-static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
+static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct pci_dev *pdev = NULL;
+ char cgx[10], lmac[10];
+ struct rvu_pfvf *pfvf;
+ int pf, domain, blkid;
+ u8 cgx_id, lmac_id;
+ u16 pcifunc;
+
+ domain = 2;
+ seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ continue;
+
+ cgx[0] = 0;
+ lmac[0] = 0;
+ pcifunc = pf << 10;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (pfvf->nix_blkaddr == BLKADDR_NIX0)
+ blkid = 0;
+ else
+ blkid = 1;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
+ &lmac_id);
+ sprintf(cgx, "CGX%d", cgx_id);
+ sprintf(lmac, "LMAC%d", lmac_id);
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
struct rvu_block *block;
struct rvu_hwinfo *hw;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
- if (blkaddr < 0) {
- dev_warn(rvu->dev, "Invalid blktype\n");
- return false;
- }
hw = rvu->hw;
block = &hw->block[blkaddr];
@@ -291,10 +326,12 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
{
void (*print_qsize)(struct seq_file *filp,
struct rvu_pfvf *pfvf) = NULL;
+ struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
+ int blkaddr;
rvu = filp->private;
switch (blktype) {
@@ -312,7 +349,15 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -329,6 +374,8 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
struct seq_file *seqfile = filp->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
+ struct dentry *current_dir;
+ int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -355,7 +402,15 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
goto qsize_write_done;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
goto qsize_write_done;
}
@@ -498,7 +553,7 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -556,7 +611,7 @@ static int write_npa_ctx(struct rvu *rvu, bool all,
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -704,9 +759,17 @@ static void ndc_cache_stats(struct seq_file *s, int blk_addr,
int ctype, int transaction)
{
u64 req, out_req, lat, cant_alloc;
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int port;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
for (port = 0; port < NDC_MAX_PORT; port++) {
req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
(port, ctype, transaction));
@@ -749,9 +812,17 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
{
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int bank, max_bank;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
max_bank = NDC_MAX_BANK(rvu, blk_addr);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
@@ -767,16 +838,30 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_RX,
- BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_TX,
- BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
@@ -792,8 +877,14 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
@@ -801,8 +892,14 @@ RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
@@ -969,7 +1066,8 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
{
void (*print_nix_ctx)(struct seq_file *filp,
struct nix_aq_enq_rsp *rsp) = NULL;
- struct rvu *rvu = filp->private;
+ struct nix_hw *nix_hw = filp->private;
+ struct rvu *rvu = nix_hw->rvu;
struct nix_aq_enq_req aq_req;
struct nix_aq_enq_rsp rsp;
char *ctype_string = NULL;
@@ -1001,7 +1099,7 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1053,13 +1151,15 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
}
static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
- int id, int ctype, char *ctype_string)
+ int id, int ctype, char *ctype_string,
+ struct seq_file *m)
{
+ struct nix_hw *nix_hw = m->private;
struct rvu_pfvf *pfvf;
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1119,7 +1219,8 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
int ctype)
{
struct seq_file *m = filp->private_data;
- struct rvu *rvu = m->private;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
char *cmd_buf, *ctype_string;
int nixlf, id = 0, ret;
bool all = false;
@@ -1155,7 +1256,7 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
goto done;
} else {
ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
- ctype_string);
+ ctype_string, m);
}
done:
kfree(cmd_buf);
@@ -1259,49 +1360,67 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
-static void rvu_dbg_nix_init(struct rvu *rvu)
+static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
{
const struct device *dev = &rvu->pdev->dev;
+ struct nix_hw *nix_hw;
struct dentry *pfile;
- rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.nix) {
- dev_err(rvu->dev, "create debugfs dir failed for nix\n");
+ if (!is_block_implemented(rvu->hw, blkaddr))
return;
+
+ if (blkaddr == BLKADDR_NIX0) {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.nix) {
+ dev_err(rvu->dev, "create debugfs dir failed for nix\n");
+ return;
+ }
+ nix_hw = &rvu->hw->nix[0];
+ } else {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
+ rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.nix) {
+ dev_err(rvu->dev,
+ "create debugfs dir failed for nix1\n");
+ return;
+ }
+ nix_hw = &rvu->hw->nix[1];
}
- pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_sq_ctx_fops);
if (!pfile)
goto create_failed;
- pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_rq_ctx_fops);
if (!pfile)
goto create_failed;
- pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
+ pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_cq_ctx_fops);
if (!pfile)
goto create_failed;
- pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_tx_cache_fops);
+ pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix,
+ nix_hw, &rvu_dbg_nix_ndc_tx_cache_fops);
if (!pfile)
goto create_failed;
- pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_rx_cache_fops);
+ pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix,
+ nix_hw, &rvu_dbg_nix_ndc_rx_cache_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ nix_hw,
+ &rvu_dbg_nix_ndc_tx_hits_miss_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ nix_hw,
+ &rvu_dbg_nix_ndc_rx_hits_miss_fops);
if (!pfile)
goto create_failed;
@@ -1312,7 +1431,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu)
return;
create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
+ dev_err(dev,
+ "Failed to create debugfs dir/file for NIX blk\n");
debugfs_remove_recursive(rvu->rvu_dbg.nix);
}
@@ -1565,7 +1685,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
struct rvu *rvu = filp->private;
int pf, vf, numvfs, blkaddr;
struct npc_mcam *mcam;
- u16 pcifunc;
+ u16 pcifunc, counters;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1573,6 +1693,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
return -ENODEV;
mcam = &rvu->hw->mcam;
+ counters = rvu->hw->npc_counters;
seq_puts(filp, "\nNPC MCAM info:\n");
/* MCAM keywidth on receive and transmit sides */
@@ -1595,10 +1716,9 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
/* MCAM counters */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- cfg = (cfg >> 48) & 0xFFFF;
- seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
- seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
+ seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ counters - mcam->counters.max);
seq_printf(filp, "\t\t Available \t: %d\n",
rvu_rsrc_free_count(&mcam->counters));
@@ -1650,6 +1770,198 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ seq_printf(s, "\t%s ", npc_get_field_name(bit));
+ switch (bit) {
+ case NPC_DMAC:
+ seq_printf(s, "%pM ", rule->packet.dmac);
+ seq_printf(s, "mask %pM\n", rule->mask.dmac);
+ break;
+ case NPC_SMAC:
+ seq_printf(s, "%pM ", rule->packet.smac);
+ seq_printf(s, "mask %pM\n", rule->mask.smac);
+ break;
+ case NPC_ETYPE:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
+ break;
+ case NPC_OUTER_VID:
+ seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_tci));
+ break;
+ case NPC_TOS:
+ seq_printf(s, "%d ", rule->packet.tos);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tos);
+ break;
+ case NPC_SIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4src);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
+ break;
+ case NPC_DIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
+ break;
+ case NPC_SIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6src);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
+ break;
+ case NPC_DIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6dst);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
+ break;
+ case NPC_SPORT_TCP:
+ case NPC_SPORT_UDP:
+ case NPC_SPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.sport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
+ break;
+ case NPC_DPORT_TCP:
+ case NPC_DPORT_UDP:
+ case NPC_DPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.dport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ if (rule->intf == NIX_INTF_TX) {
+ switch (rule->tx_action.op) {
+ case NIX_TX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_DEFAULT:
+ seq_puts(s, "\taction: Unicast to default channel\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_CHAN:
+ seq_printf(s, "\taction: Unicast to channel %d\n",
+ rule->tx_action.index);
+ break;
+ case NIX_TX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ case NIX_TX_ACTIONOP_DROP_VIOL:
+ seq_puts(s, "\taction: Lockdown Violation Drop\n");
+ break;
+ default:
+ break;
+ };
+ } else {
+ switch (rule->rx_action.op) {
+ case NIX_RX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST:
+ seq_printf(s, "\taction: Direct to queue %d\n",
+ rule->rx_action.index);
+ break;
+ case NIX_RX_ACTIONOP_RSS:
+ seq_puts(s, "\taction: RSS\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST_IPSEC:
+ seq_puts(s, "\taction: Unicast ipsec\n");
+ break;
+ case NIX_RX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ default:
+ break;
+ };
+ }
+}
+
+static const char *rvu_dbg_get_intf_name(int intf)
+{
+ switch (intf) {
+ case NIX_INTFX_RX(0):
+ return "NIX0_RX";
+ case NIX_INTFX_RX(1):
+ return "NIX1_RX";
+ case NIX_INTFX_TX(0):
+ return "NIX0_TX";
+ case NIX_INTFX_TX(1):
+ return "NIX1_TX";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
+{
+ struct rvu_npc_mcam_rule *iter;
+ struct rvu *rvu = s->private;
+ struct npc_mcam *mcam;
+ int pf, vf = -1;
+ int blkaddr;
+ u16 target;
+ u64 hits;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ mcam = &rvu->hw->mcam;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\n\tInstalled by: PF%d ", pf);
+
+ if (iter->owner & RVU_PFVF_FUNC_MASK) {
+ vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+
+ seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
+ "RX" : "TX");
+ seq_printf(s, "\tinterface: %s\n",
+ rvu_dbg_get_intf_name(iter->intf));
+ seq_printf(s, "\tmcam entry: %d\n", iter->entry);
+
+ rvu_dbg_npc_mcam_show_flows(s, iter);
+ if (iter->intf == NIX_INTF_RX) {
+ target = iter->rx_action.pf_func;
+ pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\tForward to: PF%d ", pf);
+
+ if (target & RVU_PFVF_FUNC_MASK) {
+ vf = (target & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+ }
+
+ rvu_dbg_npc_mcam_show_action(s, iter);
+ seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
+
+ if (!iter->has_cntr)
+ continue;
+ seq_printf(s, "\tcounter: %d\n", iter->cntr);
+
+ hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
+ seq_printf(s, "\thits: %lld\n", hits);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+
static void rvu_dbg_npc_init(struct rvu *rvu)
{
const struct device *dev = &rvu->pdev->dev;
@@ -1664,6 +1976,11 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
if (!pfile)
goto create_failed;
+ pfile = debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc,
+ rvu, &rvu_dbg_npc_mcam_rules_fops);
+ if (!pfile)
+ goto create_failed;
+
pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
rvu, &rvu_dbg_npc_rx_miss_act_fops);
if (!pfile)
@@ -1691,8 +2008,15 @@ void rvu_dbg_init(struct rvu *rvu)
if (!pfile)
goto create_failed;
+ pfile = debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+ if (!pfile)
+ goto create_failed;
+
rvu_dbg_npa_init(rvu);
- rvu_dbg_nix_init(rvu);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
+
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 21a89dd76d3c..e8d039503097 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -17,6 +17,7 @@
#include "npc.h"
#include "cgx.h"
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
@@ -68,6 +69,23 @@ struct mce {
u16 pcifunc;
};
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
+{
+ int i = 0;
+
+ /*If blkaddr is 0, return the first nix block address*/
+ if (blkaddr == 0)
+ return rvu->nix_blkaddr[blkaddr];
+
+ while (i + 1 < MAX_NIX_BLKS) {
+ if (rvu->nix_blkaddr[i] == blkaddr)
+ return rvu->nix_blkaddr[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -81,14 +99,16 @@ bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
int rvu_get_nixlf_count(struct rvu *rvu)
{
+ int blkaddr = 0, max = 0;
struct rvu_block *block;
- int blkaddr;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return 0;
- block = &rvu->hw->block[blkaddr];
- return block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+ return max;
}
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
@@ -130,11 +150,18 @@ static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
return idx;
}
-static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
- if (blkaddr == BLKADDR_NIX0 && hw->nix0)
- return hw->nix0;
+ int nix_blkaddr = 0, i = 0;
+ struct rvu *rvu = hw->rvu;
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ while (nix_blkaddr) {
+ if (blkaddr == nix_blkaddr && hw->nix)
+ return &hw->nix[i];
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ i++;
+ }
return NULL;
}
@@ -187,8 +214,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int pkind, pf, vf, lbkid;
u8 cgx_id, lmac_id;
- int pkind, pf, vf;
int err;
pf = rvu_get_pf(pcifunc);
@@ -221,13 +248,24 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ /* If NIX1 block is present on the silicon then NIXes are
+ * assigned alternatively for lbk interfaces. NIX0 should
+ * send packets on lbk link 1 channels and NIX1 should send
+ * on lbk link 0 channels for the communication between
+ * NIX0 and NIX1.
+ */
+ lbkid = 0;
+ if (rvu->hw->lbk_links > 1)
+ lbkid = vf & 0x1 ? 0 : 1;
+
/* Note that AF's VFs work in pairs and talk over consecutive
* loopback channels.Therefore if odd number of AF VFs are
* enabled then the last VF remains with no pair.
*/
- pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
- pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
- NIX_CHAN_LBK_CHX(0, vf + 1);
+ pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf);
+ pfvf->tx_chan_base = vf & 0x1 ?
+ NIX_CHAN_LBK_CHX(lbkid, vf - 1) :
+ NIX_CHAN_LBK_CHX(lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
@@ -265,7 +303,6 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pfvf->maxlen = 0;
pfvf->minlen = 0;
- pfvf->rxvlan = false;
/* Remove this PF_FUNC from bcast pkt replication list */
err = nix_update_bcast_mce_list(rvu, pcifunc, false);
@@ -612,8 +649,9 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
-static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp)
+static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -626,10 +664,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
bool ena;
u64 cfg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
+ blkaddr = nix_hw->blkaddr;
block = &hw->block[blkaddr];
aq = block->aq;
if (!aq) {
@@ -669,8 +704,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
break;
case NIX_AQ_CTYPE_MCE:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+
/* Check if index exceeds MCE list length */
- if (!hw->nix0->mcast.mce_ctx ||
+ if (!nix_hw->mcast.mce_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE;
@@ -832,6 +868,23 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+}
+
static const char *nix_get_ctx_name(int ctype)
{
switch (ctype) {
@@ -1129,6 +1182,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Config Rx pkt length, csum checks and apad enable / disable */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+ /* Configure pkind for TX parse config */
+ cfg = NPC_TX_DEF_PKIND;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
err = nix_interface_init(rvu, pcifunc, intf, nixlf);
if (err)
@@ -1137,6 +1194,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Disable NPC entries as NIXLF's contexts are not initialized yet */
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ /* Configure RX VTAG Type 7 (strip) for vf vlan */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
+ VTAGSIZE_T4 | VTAG_STRIP);
+
goto exit;
free_mem:
@@ -1164,10 +1226,14 @@ exit:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
rsp->qints = ((cfg >> 12) & 0xFFF);
rsp->cints = ((cfg >> 24) & 0xFFF);
+ rsp->cgx_links = hw->cgx_links;
+ rsp->lbk_links = hw->lbk_links;
+ rsp->sdp_links = hw->sdp_links;
+
return rc;
}
-int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1186,6 +1252,15 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ if (req->flags & NIX_LF_DISABLE_FLOWS)
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Free any tx vtag def entries used by this NIX LF */
+ if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
+ nix_free_tx_vtag_entries(rvu, pcifunc);
+
nix_interface_deinit(rvu, pcifunc, nixlf);
/* Reset this NIX LF */
@@ -1914,9 +1989,14 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
{
u64 regval = req->vtag_size;
- if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
+ if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
+ req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
+ /* RX VTAG Type 7 reserved for vf vlan */
+ if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ return NIX_AF_ERR_RX_VTAG_INUSE;
+
if (req->rx.capture_vtag)
regval |= BIT_ULL(5);
if (req->rx.strip_vtag)
@@ -1927,9 +2007,149 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
return 0;
}
+static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, int index)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+
+ if (vlan->entry2pfvf_map[index] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
+
+ vlan->entry2pfvf_map[index] = 0;
+ rvu_free_rsrc(&vlan->rsrc, index);
+
+ return 0;
+}
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+ /* Scan all the entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < vlan->rsrc.max; index++) {
+ if (vlan->entry2pfvf_map[index] == pcifunc)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
+ }
+ mutex_unlock(&vlan->rsrc_lock);
+}
+
+static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
+ u64 vtag, u8 size)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u64 regval;
+ int index;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ index = rvu_alloc_rsrc(&vlan->rsrc);
+ if (index < 0) {
+ mutex_unlock(&vlan->rsrc_lock);
+ return index;
+ }
+
+ mutex_unlock(&vlan->rsrc_lock);
+
+ regval = size ? vtag : vtag << 32;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), size);
+
+ return index;
+}
+
+static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx0 = req->tx.vtag0_idx;
+ int idx1 = req->tx.vtag1_idx;
+ int err;
+
+ if (req->tx.free_vtag0 && req->tx.free_vtag1)
+ if (vlan->entry2pfvf_map[idx0] != pcifunc ||
+ vlan->entry2pfvf_map[idx1] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ if (req->tx.free_vtag0) {
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
+ if (err)
+ goto exit;
+ }
+
+ if (req->tx.free_vtag1)
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
+
+exit:
+ mutex_unlock(&vlan->rsrc_lock);
+ return err;
+}
+
+static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u16 pcifunc = req->hdr.pcifunc;
+
+ if (req->tx.cfg_vtag0) {
+ rsp->vtag0_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag0, req->vtag_size);
+
+ if (rsp->vtag0_idx < 0)
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+
+ vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
+ }
+
+ if (req->tx.cfg_vtag1) {
+ rsp->vtag1_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag1, req->vtag_size);
+
+ if (rsp->vtag1_idx < 0)
+ goto err_free;
+
+ vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
+ }
+
+ return 0;
+
+err_free:
+ if (req->tx.cfg_vtag0)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
+
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+}
+
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
- struct msg_rsp *rsp)
+ struct nix_vtag_config_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
@@ -1939,19 +2159,28 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
return err;
if (req->cfg_type) {
+ /* rx vtag configuration */
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
if (err)
return NIX_AF_ERR_PARAM;
} else {
- /* TODO: handle tx vtag configuration */
- return 0;
+ /* tx vtag configuration */
+ if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
+ (req->tx.free_vtag0 || req->tx.free_vtag1))
+ return NIX_AF_ERR_PARAM;
+
+ if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
+ return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
+
+ if (req->tx.free_vtag0 || req->tx.free_vtag1)
+ return nix_tx_vtag_decfg(rvu, blkaddr, req);
}
return 0;
}
-static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
- u16 pcifunc, int next, bool eol)
+static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
+ int mce, u8 op, u16 pcifunc, int next, bool eol)
{
struct nix_aq_enq_req aq_req;
int err;
@@ -1971,7 +2200,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
/* All fields valid */
*(u64 *)(&aq_req.mce_mask) = ~0ULL;
- err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
if (err) {
dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
@@ -2077,9 +2306,9 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
next_idx = idx + 1;
/* EOL should be set in last MCE */
- err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
- mce->pcifunc, next_idx,
- (next_idx > last_idx) ? true : false);
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
if (err)
goto end;
idx++;
@@ -2108,6 +2337,11 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
numvfs = (cfg >> 12) & 0xFF;
pfvf = &rvu->pf[pf];
+
+ /* This NIX0/1 block mapped to PF ? */
+ if (pfvf->nix_blkaddr != nix_hw->blkaddr)
+ continue;
+
/* Save the start MCE */
pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
@@ -2122,9 +2356,10 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
* Will be updated when a NIXLF is attached/detached to
* these PF/VFs.
*/
- err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
- NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
if (err)
return err;
}
@@ -2176,6 +2411,31 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
return nix_setup_bcast_tables(rvu, nix_hw);
}
+static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ int err;
+
+ /* Allocate resource bimap for tx vtag def registers*/
+ vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
+ err = rvu_alloc_bitmap(&vlan->rsrc);
+ if (err)
+ return -ENOMEM;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!vlan->entry2pfvf_map)
+ goto free_mem;
+
+ mutex_init(&vlan->rsrc_lock);
+ return 0;
+
+free_mem:
+ kfree(vlan->rsrc.bmap);
+ return -ENOMEM;
+}
+
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
@@ -2680,6 +2940,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
+ bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@@ -2690,13 +2951,15 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* VF can't overwrite admin(PF) changes */
+ if (from_vf && pfvf->pf_set_vf_cfg)
+ return -EPERM;
+
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
-
return 0;
}
@@ -2743,9 +3006,6 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
else
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, allmulti);
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
-
return 0;
}
@@ -2882,65 +3142,6 @@ linkcfg:
return 0;
}
-int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
-{
- struct npc_mcam_alloc_entry_req alloc_req = { };
- struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
- struct npc_mcam_free_entry_req free_req = { };
- u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
- struct rvu_pfvf *pfvf;
-
- /* LBK VFs do not have separate MCAM UCAST entry hence
- * skip allocating rxvlan for them
- */
- if (is_afvf(pcifunc))
- return 0;
-
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- if (pfvf->rxvlan)
- return 0;
-
- /* alloc new mcam entry */
- alloc_req.hdr.pcifunc = pcifunc;
- alloc_req.count = 1;
-
- err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
- &alloc_rsp);
- if (err)
- return err;
-
- /* update entry to enable rxvlan offload */
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- pfvf->rxvlan_index = alloc_rsp.entry_list[0];
- /* all it means is that rxvlan_index is valid */
- pfvf->rxvlan = true;
-
- err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
- if (err)
- goto free_entry;
-
- return 0;
-free_entry:
- free_req.hdr.pcifunc = pcifunc;
- free_req.entry = alloc_rsp.entry_list[0];
- rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
- pfvf->rxvlan = false;
- return err;
-}
-
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
struct msg_rsp *rsp)
{
@@ -3109,17 +3310,15 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
return 0;
}
-int rvu_nix_init(struct rvu *rvu)
+static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
{
const struct npc_lt_def_cfg *ltdefs;
struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = nix_hw->blkaddr;
struct rvu_block *block;
- int blkaddr, err;
+ int err;
u64 cfg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return 0;
block = &hw->block[blkaddr];
if (is_rvu_96xx_B0(rvu)) {
@@ -3153,7 +3352,7 @@ int rvu_nix_init(struct rvu *rvu)
hw->cgx = (cfg >> 12) & 0xF;
hw->lmac_per_cgx = (cfg >> 8) & 0xF;
hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
- hw->lbk_links = 1;
+ hw->lbk_links = (cfg >> 24) & 0xF;
hw->sdp_links = 1;
/* Initialize admin queue */
@@ -3164,26 +3363,25 @@ int rvu_nix_init(struct rvu *rvu)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
- if (blkaddr == BLKADDR_NIX0) {
- hw->nix0 = devm_kzalloc(rvu->dev,
- sizeof(struct nix_hw), GFP_KERNEL);
- if (!hw->nix0)
- return -ENOMEM;
+ if (is_block_implemented(hw, blkaddr)) {
+ err = nix_setup_txschq(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
- err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
+ err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
if (err)
return err;
- err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
+ err = nix_setup_mcast(rvu, nix_hw, blkaddr);
if (err)
return err;
- err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+ err = nix_setup_txvlan(rvu, nix_hw);
if (err)
return err;
/* Configure segmentation offload formats */
- nix_setup_lso(rvu, hw->nix0, blkaddr);
+ nix_setup_lso(rvu, nix_hw, blkaddr);
/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
* This helps HW protocol checker to identify headers
@@ -3236,23 +3434,45 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
}
-void rvu_nix_freemem(struct rvu *rvu)
+int rvu_nix_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- struct rvu_block *block;
+ struct nix_hw *nix_hw;
+ int blkaddr = 0, err;
+ int i = 0;
+
+ hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
+ GFP_KERNEL);
+ if (!hw->nix)
+ return -ENOMEM;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ nix_hw = &hw->nix[i];
+ nix_hw->rvu = rvu;
+ nix_hw->blkaddr = blkaddr;
+ err = rvu_nix_block_init(rvu, nix_hw);
+ if (err)
+ return err;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ i++;
+ }
+
+ return 0;
+}
+
+static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
+ struct rvu_block *block)
+{
struct nix_txsch *txsch;
struct nix_mcast *mcast;
+ struct nix_txvlan *vlan;
struct nix_hw *nix_hw;
- int blkaddr, lvl;
+ int lvl;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return;
-
- block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
- if (blkaddr == BLKADDR_NIX0) {
+ if (is_block_implemented(rvu->hw, blkaddr)) {
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return;
@@ -3262,6 +3482,11 @@ void rvu_nix_freemem(struct rvu *rvu)
kfree(txsch->schq.bmap);
}
+ vlan = &nix_hw->txvlan;
+ kfree(vlan->rsrc.bmap);
+ mutex_destroy(&vlan->rsrc_lock);
+ devm_kfree(rvu->dev, vlan->entry2pfvf_map);
+
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf);
@@ -3269,6 +3494,20 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &hw->block[blkaddr];
+ rvu_nix_block_freemem(rvu, blkaddr, block);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -3281,6 +3520,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+ npc_mcam_enable_flows(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -3296,6 +3537,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ npc_mcam_disable_flows(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
@@ -3308,6 +3551,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
ctx_req.hdr.pcifunc = pcifunc;
/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
nix_interface_deinit(rvu, pcifunc, nixlf);
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
@@ -3431,3 +3676,12 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
return 0;
}
+
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 511b01dd03ed..5cf9b7a907ae 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -28,6 +28,8 @@
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
#define NPC_HW_TSTAMP_OFFSET 8
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
static const char def_pfl_name[] = "default";
@@ -36,6 +38,81 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
u16 pcifunc);
+bool is_npc_intf_tx(u8 intf)
+{
+ return !!(intf & 0x1);
+}
+
+bool is_npc_intf_rx(u8 intf)
+{
+ return !(intf & 0x1);
+}
+
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ return intf < hw->npc_intfs;
+}
+
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
+{
+ /* Due to a HW issue in these silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ return nibble_ena;
+ return 0;
+}
+
+static int npc_mcam_verify_pf_func(struct rvu *rvu,
+ struct mcam_entry *entry_data, u8 intf,
+ u16 pcifunc)
+{
+ u16 pf_func, pf_func_mask;
+
+ if (is_npc_intf_rx(intf))
+ return 0;
+
+ pf_func_mask = (entry_data->kw_mask[0] >> 32) &
+ NPC_KEX_PF_FUNC_MASK;
+ pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
+
+ pf_func = be16_to_cpu((__force __be16)pf_func);
+ if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
+ ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
+ (pcifunc & ~RVU_PFVF_FUNC_MASK)))
+ return -EINVAL;
+
+ return 0;
+}
+
+int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ int base = 0, end;
+
+ if (is_npc_intf_tx(intf))
+ return 0;
+
+ if (is_afvf(pcifunc)) {
+ end = rvu_get_num_lbk_chans();
+ if (end < 0)
+ return -EINVAL;
+ } else {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0);
+ /* CGX mapped functions has maximum of 16 channels */
+ end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF);
+ }
+
+ if (channel < base || channel > end)
+ return -EINVAL;
+
+ return 0;
+}
+
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -94,6 +171,31 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
return 0;
}
+static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
+ int nixlf)
+{
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int blkaddr = 0, max = 0;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* Given a PF/VF and NIX LF number calculate the unicast mcam
+ * entry index based on the NIX block assigned to the PF/VF.
+ */
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ if (pfvf->nix_blkaddr == blkaddr)
+ break;
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF;
+}
+
static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
@@ -114,10 +216,10 @@ static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
return index + 1;
}
- return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
+ return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
}
-static int npc_get_bank(struct npc_mcam *mcam, int index)
+int npc_get_bank(struct npc_mcam *mcam, int index)
{
int bank = index / mcam->banksize;
@@ -139,8 +241,8 @@ static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
return (cfg & 1);
}
-static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index, bool enable)
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
{
int bank = npc_get_bank(mcam, index);
int actbank = bank;
@@ -257,6 +359,93 @@ static void npc_get_keyword(struct mcam_entry *entry, int idx,
*cam0 = ~*cam1 & kw_mask;
}
+static void npc_fill_entryword(struct mcam_entry *entry, int idx,
+ u64 cam0, u64 cam1)
+{
+ /* Similar to npc_get_keyword, but fills mcam_entry structure from
+ * CAM registers.
+ */
+ switch (idx) {
+ case 0:
+ entry->kw[0] = cam1;
+ entry->kw_mask[0] = cam1 ^ cam0;
+ break;
+ case 1:
+ entry->kw[1] = cam1;
+ entry->kw_mask[1] = cam1 ^ cam0;
+ break;
+ case 2:
+ entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[2] = (cam1 >> 16) & CAM_MASK(48);
+ entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48);
+ break;
+ case 3:
+ entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[3] = (cam1 >> 16) & CAM_MASK(32);
+ entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32);
+ break;
+ case 4:
+ entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[4] = (cam1 >> 32) & CAM_MASK(32);
+ entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32);
+ break;
+ case 5:
+ entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[5] = (cam1 >> 32) & CAM_MASK(16);
+ entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16);
+ break;
+ case 6:
+ entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw[6] = (cam1 >> 48) & CAM_MASK(16);
+ entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16);
+ break;
+ case 7:
+ entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ break;
+ }
+}
+
+static void npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index,
+ struct mcam_entry *entry)
+{
+ u16 owner, target_func;
+ struct rvu_pfvf *pfvf;
+ int bank, nixlf;
+ u64 rx_action;
+
+ owner = mcam->entry2pfvf_map[index];
+ target_func = (entry->action >> 4) & 0xffff;
+ /* return incase target is PF or LBK or rule owner is not PF */
+ if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
+ !(target_func & RVU_PFVF_FUNC_MASK))
+ return;
+
+ pfvf = rvu_get_pfvf(rvu, target_func);
+ mcam->entry2target_pffunc[index] = target_func;
+ /* return if nixlf is not attached or initialized */
+ if (!is_nixlf_attached(rvu, target_func) || !pfvf->def_ucast_rule)
+ return;
+
+ /* get VF ucast entry rule */
+ nix_get_nixlf(rvu, target_func, &nixlf, NULL);
+ index = npc_get_nixlf_mcam_index(mcam, target_func,
+ nixlf, NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ rx_action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ if (rx_action)
+ entry->action = rx_action;
+}
+
static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u8 intf,
struct mcam_entry *entry, bool enable)
@@ -304,6 +493,11 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
}
+ /* copy VF default entry action to the VF mcam entry */
+ if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
+ npc_get_default_entry_action(rvu, mcam, blkaddr, actindex,
+ entry);
+
/* Set 'action' */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
@@ -317,6 +511,42 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
}
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src,
+ struct mcam_entry *entry, u8 *intf, u8 *ena)
+{
+ int sbank = npc_get_bank(mcam, src);
+ int bank, kw = 0;
+ u64 cam0, cam1;
+
+ src &= (mcam->banksize - 1);
+ bank = sbank;
+
+ for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0));
+ npc_fill_entryword(entry, kw, cam0, cam1);
+
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0));
+ npc_fill_entryword(entry, kw + 1, cam0, cam1);
+ }
+
+ entry->action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ entry->vtag_action =
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ *intf = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3;
+ *ena = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
+}
+
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, u16 dest)
{
@@ -371,11 +601,11 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct mcam_entry entry = { {0} };
struct nix_rx_action action;
- int blkaddr, index, kwi;
- u64 mac = 0;
+ int blkaddr, index;
/* AF's VFs work in promiscuous mode */
if (is_afvf(pcifunc))
@@ -385,20 +615,9 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
- for (index = ETH_ALEN - 1; index >= 0; index--)
- mac |= ((u64)*mac_addr++) << (8 * index);
-
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
- /* Match ingress channel and DMAC */
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xFFFULL;
-
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
- entry.kw[kwi] = mac;
- entry.kw_mask[kwi] = BIT_ULL(48) - 1;
-
/* Don't change the action if entry is already enabled
* Otherwise RSS action may get overwritten.
*/
@@ -411,25 +630,26 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
action.pf_func = pcifunc;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
-
- /* add VLAN matching, setup action and save entry back for later */
- entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
- entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
+ req.default_rule = 1;
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = action.pf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
- entry.vtag_action = VTAG0_VALID_BIT |
- FIELD_PREP(VTAG0_TYPE_MASK, 0) |
- FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
- FIELD_PREP(VTAG0_RELPTR_MASK, 12);
-
- memcpy(&pfvf->entry, &entry, sizeof(entry));
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, bool allmulti)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, ucast_idx, index, kwi;
struct mcam_entry entry = { {0} };
@@ -473,7 +693,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ pfvf->nix_rx_intf, &entry, true);
}
static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
@@ -531,6 +751,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
@@ -553,14 +774,13 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
} else {
- pfvf = rvu_get_pfvf(rvu, pcifunc);
action.index = pfvf->bcast_mce_idx;
action.op = NIX_RX_ACTIONOP_MCAST;
}
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ pfvf->nix_rx_intf, &entry, true);
}
void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
@@ -579,12 +799,47 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
+static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc, u64 rx_action)
+{
+ int actindex, index, bank;
+ bool enable;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc) {
+ bank = npc_get_bank(mcam, index);
+ actindex = index;
+ index &= (mcam->banksize - 1);
+
+ /* read vf flow entry enable status */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
+ actindex);
+ /* disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex,
+ false);
+ /* update 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
+ rx_action);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ actindex, true);
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_rx_action action;
int blkaddr, index, bank;
+ struct rvu_pfvf *pfvf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -621,6 +876,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+ /* update the VF flow rule action with the VF default entry action */
+ if (mcam_index < 0)
+ npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
+ *(u64 *)&action);
+
+ /* update the action change in default rule */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->def_ucast_rule)
+ pfvf->def_ucast_rule->rx_action = action;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
@@ -635,8 +900,6 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
*(u64 *)&action);
}
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
@@ -688,8 +951,6 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
else
rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
@@ -704,7 +965,41 @@ void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Disable MCAM entries directing traffic to this 'pcifunc' */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == pcifunc) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, false);
+ rule->enable = false;
+ /* Indicate that default rule is disabled */
+ if (rule->default_rule)
+ pfvf->def_ucast_rule = NULL;
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+
+ npc_mcam_disable_flows(rvu, pcifunc);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -713,12 +1008,20 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
mutex_lock(&mcam->lock);
- /* Disable and free all MCAM entries mapped to this 'pcifunc' */
+ /* Free all MCAM entries owned by this 'pcifunc' */
npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
- /* Free all MCAM counters mapped to this 'pcifunc' */
+ /* Free all MCAM counters owned by this 'pcifunc' */
npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+ /* Delete MCAM entries owned by this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->owner == pcifunc && !rule->default_rule) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
mutex_unlock(&mcam->lock);
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
@@ -732,44 +1035,78 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
- const struct npc_mcam_kex *mkex)
+static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex, u8 intf)
{
int lid, lt, ld, fl;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- mkex->keyx_cfg[NIX_INTF_RX]);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- mkex->keyx_cfg[NIX_INTF_TX]);
+ if (is_npc_intf_tx(intf))
+ return;
- for (ld = 0; ld < NPC_MAX_LD; ld++)
- rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
- mkex->kex_ld_flags[ld]);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_RX]);
+ /* Program LDATA */
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
- for (ld = 0; ld < NPC_MAX_LD; ld++) {
- SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
mkex->intf_lid_lt_ld[NIX_INTF_RX]
[lid][lt][ld]);
-
- SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
- mkex->intf_lid_lt_ld[NIX_INTF_TX]
- [lid][lt][ld]);
- }
}
}
-
+ /* Program LFLAGS */
for (ld = 0; ld < NPC_MAX_LD; ld++) {
- for (fl = 0; fl < NPC_MAX_LFL; fl++) {
- SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
mkex->intf_ld_flags[NIX_INTF_RX]
[ld][fl]);
+ }
+}
- SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
+static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex, u8 intf)
+{
+ int lid, lt, ld, fl;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_TX]);
+
+ /* Program LDATA */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_TX]
+ [lid][lt][ld]);
+ }
+ }
+ /* Program LFLAGS */
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
mkex->intf_ld_flags[NIX_INTF_TX]
[ld][fl]);
- }
+ }
+}
+
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+ int ld;
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+ mkex->kex_ld_flags[ld]);
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
+ npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
}
}
@@ -909,7 +1246,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
kpu, profile->cam_entries, profile->action_entries);
}
- max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF;
+ max_entries = rvu->hw->npc_kpu_entries;
/* Program CAM match entries for previous KPU extracted data */
num_entries = min_t(int, profile->cam_entries, max_entries);
@@ -964,9 +1301,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
int num_pkinds, num_kpus, idx;
struct npc_pkind *pkind;
- /* Get HW limits */
- hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F;
-
/* Disable all KPUs and their entries */
for (idx = 0; idx < hw->npc_kpus; idx++) {
rvu_write64(rvu, blkaddr,
@@ -1005,12 +1339,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
int rsvd, err;
u64 cfg;
- /* Get HW limits */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- mcam->banks = (cfg >> 44) & 0xF;
- mcam->banksize = (cfg >> 28) & 0xFFFF;
- mcam->counters.max = (cfg >> 48) & 0xFFFF;
-
/* Actual number of MCAM entries vary by entry size */
cfg = (rvu_read64(rvu, blkaddr,
NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
@@ -1077,12 +1405,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
- /* Reserve last counter for MCAM RX miss action which is set to
- * drop pkt. This way we will know how many pkts didn't match
- * any MCAM entry.
- */
- mcam->counters.max--;
- mcam->rx_miss_act_cntr = mcam->counters.max;
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
@@ -1109,6 +1431,12 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (!mcam->cntr_refcnt)
goto free_mem;
+ /* Alloc memory for saving target device of mcam rule */
+ mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2target_pffunc)
+ goto free_mem;
+
mutex_init(&mcam->lock);
return 0;
@@ -1118,12 +1446,110 @@ free_mem:
return -ENOMEM;
}
+static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 npc_const, npc_const1;
+ u64 npc_const2 = 0;
+
+ npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1);
+ if (npc_const1 & BIT_ULL(63))
+ npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
+
+ pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
+ hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
+ hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
+ hw->npc_intfs = npc_const & 0xFULL;
+ hw->npc_counters = (npc_const >> 48) & 0xFFFFULL;
+
+ mcam->banks = (npc_const >> 44) & 0xFULL;
+ mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
+ /* Extended set */
+ if (npc_const2) {
+ hw->npc_ext_set = true;
+ hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
+ mcam->banksize = npc_const2 & 0xFFFFULL;
+ }
+
+ mcam->counters.max = hw->npc_counters;
+}
+
+static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nibble_ena, rx_kex, tx_kex;
+ u8 intf;
+
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop packet. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
+ rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
+ tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+
+ nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
+ if (nibble_ena) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ }
+
+ /* Configure RX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_tx(intf))
+ continue;
+
+ /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ rx_kex);
+
+ /* If MCAM lookup doesn't result in a match, drop the received
+ * packet. And map this action to a counter to count dropped
+ * packets.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP);
+
+ /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9]
+ * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0]
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_STAT_ACT(intf),
+ ((mcam->rx_miss_act_cntr >> 9) << 12) |
+ BIT_ULL(9) | mcam->rx_miss_act_cntr);
+ }
+
+ /* Configure TX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_rx(intf))
+ continue;
+
+ /* Extract Ltypes LID_LA to LID_LE */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ tx_kex);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ }
+}
+
int rvu_npc_init(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 cfg, nibble_ena, rx_kex, tx_kex;
int blkaddr, entry, bank, err;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1132,17 +1558,15 @@ int rvu_npc_init(struct rvu *rvu)
return -ENODEV;
}
+ rvu_npc_hw_init(rvu, blkaddr);
+
/* First disable all MCAM entries, to stop traffic towards NIXLFs */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
- for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+ for (bank = 0; bank < mcam->banks; bank++) {
+ for (entry = 0; entry < mcam->banksize; entry++)
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
}
- /* Allocate resource bimap for pkind*/
- pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
- NPC_AF_CONST1) >> 12) & 0xFF;
err = rvu_alloc_bitmap(&pkind->rsrc);
if (err)
return err;
@@ -1180,42 +1604,21 @@ int rvu_npc_init(struct rvu *rvu)
BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
BIT_ULL(2) | BIT_ULL(1));
- /* Set RX and TX side MCAM search key size.
- * LA..LD (ltype only) + Channel
- */
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
- nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex);
- /* Due to an errata (35786) in A0 pass silicon, parse nibble enable
- * configuration has to be identical for both Rx and Tx interfaces.
- */
- if (is_rvu_96xx_B0(rvu)) {
- tx_kex &= ~NPC_PARSE_NIBBLE;
- tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- }
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex);
-
- err = npc_mcam_rsrcs_init(rvu, blkaddr);
- if (err)
- return err;
+ rvu_npc_setup_interfaces(rvu, blkaddr);
/* Configure MKEX profile */
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
- /* Set TX miss action to UCAST_DEFAULT i.e
- * transmit the packet on NIX LF SQ's default channel.
- */
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
- NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
- /* If MCAM lookup doesn't result in a match, drop the received packet.
- * And map this action to a counter to count dropped pkts.
- */
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
- NIX_RX_ACTIONOP_DROP);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
- BIT_ULL(9) | mcam->rx_miss_act_cntr);
+ err = npc_flow_steering_init(rvu, blkaddr);
+ if (err) {
+ dev_err(rvu->dev,
+ "Incorrect mkex profile loaded using default mkex\n");
+ npc_load_mkex_profile(rvu, blkaddr, def_pfl_name);
+ }
return 0;
}
@@ -1307,10 +1710,13 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
/* Set mapping and increment counter's refcnt */
mcam->entry2cntr_map[entry] = cntr;
mcam->cntr_refcnt[cntr]++;
- /* Enable stats */
+ /* Enable stats
+ * NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9]
+ * NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0]
+ */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
- BIT_ULL(9) | cntr);
+ ((cntr >> 9) << 12) | BIT_ULL(9) | cntr);
}
static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
@@ -1380,6 +1786,7 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
npc_unmap_mcam_entry_and_cntr(rvu, mcam,
blkaddr, index,
cntr);
+ mcam->entry2target_pffunc[index] = 0x0;
}
}
}
@@ -1766,6 +2173,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
goto exit;
mcam->entry2pfvf_map[req->entry] = 0;
+ mcam->entry2target_pffunc[req->entry] = 0x0;
npc_mcam_clear_bit(mcam, req->entry);
npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -1785,18 +2193,49 @@ exit:
return rc;
}
+int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu,
+ struct npc_mcam_read_entry_req *req,
+ struct npc_mcam_read_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (!rc) {
+ npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry,
+ &rsp->entry_data,
+ &rsp->intf, &rsp->enable);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
struct npc_mcam_write_entry_req *req,
struct msg_rsp *rsp)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
+ u16 channel, chan_mask;
int blkaddr, rc;
+ u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
+ chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ channel &= chan_mask;
+
mutex_lock(&mcam->lock);
rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
if (rc)
@@ -1808,12 +2247,28 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
goto exit;
}
- if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+ if (!is_npc_interface_valid(rvu, req->intf)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
- npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ pcifunc)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf,
&req->entry_data, req->enable_entry);
if (req->set_cntr)
@@ -2141,6 +2596,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam_alloc_and_write_entry_req *req,
struct npc_mcam_alloc_and_write_entry_rsp *rsp)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npc_mcam_alloc_counter_req cntr_req;
struct npc_mcam_alloc_counter_rsp cntr_rsp;
struct npc_mcam_alloc_entry_req entry_req;
@@ -2148,13 +2604,26 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 entry = NPC_MCAM_ENTRY_INVALID;
u16 cntr = NPC_MCAM_ENTRY_INVALID;
+ u16 channel, chan_mask;
int blkaddr, rc;
+ u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
- if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_MCAM_INVALID_REQ;
+
+ chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ channel &= chan_mask;
+
+ if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel))
+ return NPC_MCAM_INVALID_REQ;
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ req->hdr.pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Try to allocate a MCAM entry */
@@ -2196,7 +2665,13 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
write_entry:
mutex_lock(&mcam->lock);
- npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf,
&req->entry_data, req->enable_entry);
if (req->alloc_cntr)
@@ -2255,26 +2730,72 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 pcifunc, u8 intf, struct mcam_entry *entry,
+ int *index)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
bool enable;
+ u8 nix_intf;
+
+ if (is_npc_intf_tx(intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ *index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ /* dont force enable unicast entry */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, *index, nix_intf,
+ entry, enable);
+
+ return enable;
+}
+
+int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
+ struct msg_req *req,
+ struct npc_mcam_read_base_rule_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int index, blkaddr, nixlf, rc = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u8 intf, enable;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ return NPC_MCAM_INVALID_REQ;
- if (!pfvf->rxvlan)
- return 0;
+ /* Return the channel number in case of PF */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ rsp->entry.kw[0] = pfvf->rx_chan_base;
+ rsp->entry.kw_mask[0] = 0xFFFULL;
+ goto out;
+ }
+ /* Find the pkt steering rule installed by PF to this VF */
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc)
+ goto read_entry;
+ }
+
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (rc < 0) {
+ mutex_unlock(&mcam->lock);
+ goto out;
+ }
+ /* Read the default ucast entry if there is no pkt steering rule */
index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
NIXLF_UCAST_ENTRY);
- pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
- enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
- npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
- NIX_INTF_RX, &pfvf->entry, enable);
-
- return 0;
+read_entry:
+ /* Read the mcam entry */
+ npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
+ &enable);
+ mutex_unlock(&mcam->lock);
+out:
+ return rc;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
new file mode 100644
index 000000000000..4ddfdff33a61
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -0,0 +1,1334 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
+
+static const char * const npc_flow_names[] = {
+ [NPC_DMAC] = "dmac",
+ [NPC_SMAC] = "smac",
+ [NPC_ETYPE] = "ether type",
+ [NPC_OUTER_VID] = "outer vlan id",
+ [NPC_TOS] = "tos",
+ [NPC_SIP_IPV4] = "ipv4 source ip",
+ [NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_SIP_IPV6] = "ipv6 source ip",
+ [NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_SPORT_TCP] = "tcp source port",
+ [NPC_DPORT_TCP] = "tcp destination port",
+ [NPC_SPORT_UDP] = "udp source port",
+ [NPC_DPORT_UDP] = "udp destination port",
+ [NPC_SPORT_SCTP] = "sctp source port",
+ [NPC_DPORT_SCTP] = "sctp destination port",
+ [NPC_UNKNOWN] = "unknown",
+};
+
+const char *npc_get_field_name(u8 hdr)
+{
+ if (hdr >= ARRAY_SIZE(npc_flow_names))
+ return npc_flow_names[NPC_UNKNOWN];
+
+ return npc_flow_names[hdr];
+}
+
+/* Compute keyword masks and figure out the number of keywords a field
+ * spans in the key.
+ */
+static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
+ u8 nr_bits, int start_kwi, int offset, u8 intf)
+{
+ struct npc_key_field *field = &mcam->rx_key_fields[type];
+ u8 bits_in_kw;
+ int max_kwi;
+
+ if (mcam->banks_per_entry == 1)
+ max_kwi = 1; /* NPC_MCAM_KEY_X1 */
+ else if (mcam->banks_per_entry == 2)
+ max_kwi = 3; /* NPC_MCAM_KEY_X2 */
+ else
+ max_kwi = 6; /* NPC_MCAM_KEY_X4 */
+
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (offset + nr_bits <= 64) {
+ /* one KW only */
+ if (start_kwi > max_kwi)
+ return;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0)
+ << offset;
+ field->nr_kws = 1;
+ } else if (offset + nr_bits > 64 &&
+ offset + nr_bits <= 128) {
+ /* two KWs */
+ if (start_kwi + 1 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 64;
+ field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 2;
+ } else {
+ /* three KWs */
+ if (start_kwi + 2 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask */
+ field->kw_mask[start_kwi + 1] = ~0ULL;
+ /* third KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 128;
+ field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 3;
+ }
+}
+
+/* Helper function to figure out whether field exists in the key */
+static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *input;
+
+ input = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ return input->nr_kws > 0;
+}
+
+static bool npc_is_same(struct npc_key_field *input,
+ struct npc_key_field *field)
+{
+ int ret;
+
+ ret = memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata));
+ return ret == 0;
+}
+
+static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
+ u64 cfg, u8 lid, u8 lt, u8 intf)
+{
+ struct npc_key_field *input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ input->layer_mdata.ltype = lt;
+ input->layer_mdata.lid = lid;
+}
+
+static bool npc_check_overlap_fields(struct npc_key_field *input1,
+ struct npc_key_field *input2)
+{
+ int kwi;
+
+ /* Fields with same layer id and different ltypes are mutually
+ * exclusive hence they can be overlapped
+ */
+ if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
+ input1->layer_mdata.ltype != input2->layer_mdata.ltype)
+ return false;
+
+ for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
+ if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
+ return true;
+ }
+
+ return false;
+}
+
+/* Helper function to check whether given field overlaps with any other fields
+ * in the key. Due to limitations on key size and the key extraction profile in
+ * use higher layers can overwrite lower layer's header fields. Hence overlap
+ * needs to be checked.
+ */
+static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
+ enum key_fields type, u8 start_lid, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *dummy, *input;
+ int start_kwi, offset;
+ u8 nr_bits, lid, lt, ld;
+ u64 cfg;
+
+ dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
+ input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf)) {
+ dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
+ input = &mcam->tx_key_fields[type];
+ }
+
+ for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ memset(dummy, 0, sizeof(struct npc_key_field));
+ npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
+ lid, lt, intf);
+ /* exclude input */
+ if (npc_is_same(input, dummy))
+ continue;
+ start_kwi = dummy->layer_mdata.key / 8;
+ offset = (dummy->layer_mdata.key * 8) % 64;
+ nr_bits = dummy->layer_mdata.len * 8;
+ /* form KW masks */
+ npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
+ start_kwi, offset, intf);
+ /* check any input field bits falls in any
+ * other field bits.
+ */
+ if (npc_check_overlap_fields(dummy, input))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
+{
+ if (!npc_is_field_present(rvu, type, intf) ||
+ npc_check_overlap(rvu, blkaddr, type, 0, intf))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 0 ... 2:
+ type = NPC_CHAN;
+ break;
+ case 3:
+ type = NPC_ERRLEV;
+ break;
+ case 4 ... 5:
+ type = NPC_ERRCODE;
+ break;
+ case 6:
+ type = NPC_LXMB;
+ break;
+ /* check for LTYPE only as of now */
+ case 9:
+ type = NPC_LA;
+ break;
+ case 12:
+ type = NPC_LB;
+ break;
+ case 15:
+ type = NPC_LC;
+ break;
+ case 18:
+ type = NPC_LD;
+ break;
+ case 21:
+ type = NPC_LE;
+ break;
+ case 24:
+ type = NPC_LF;
+ break;
+ case 27:
+ type = NPC_LG;
+ break;
+ case 30:
+ type = NPC_LH;
+ break;
+ default:
+ return;
+ };
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *key_fields;
+ /* Ether type can come from three layers
+ * (ethernet, single tagged, double tagged)
+ */
+ struct npc_key_field *etype_ether;
+ struct npc_key_field *etype_tag1;
+ struct npc_key_field *etype_tag2;
+ /* Outer VLAN TCI can come from two layers
+ * (single tagged, double tagged)
+ */
+ struct npc_key_field *vlan_tag1;
+ struct npc_key_field *vlan_tag2;
+ u64 *features;
+ u8 start_lid;
+ int i;
+
+ key_fields = mcam->rx_key_fields;
+ features = &mcam->rx_features;
+
+ if (is_npc_intf_tx(intf)) {
+ key_fields = mcam->tx_key_fields;
+ features = &mcam->tx_features;
+ }
+
+ /* Handle header fields which can come from multiple layers like
+ * etype, outer vlan tci. These fields should have same position in
+ * the key otherwise to install a mcam rule more than one entry is
+ * needed which complicates mcam space management.
+ */
+ etype_ether = &key_fields[NPC_ETYPE_ETHER];
+ etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
+ etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
+ vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
+ vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+
+ /* if key profile programmed does not extract Ethertype at all */
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ goto vlan_tci;
+
+ /* if key profile programmed extracts Ethertype from one layer */
+ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_ether;
+ if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag2;
+
+ /* if key profile programmed extracts Ethertype from multiple layers */
+ if (etype_ether->nr_kws && etype_tag1->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ }
+ if (etype_ether->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+ if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+
+ /* check none of higher layers overwrite Ethertype */
+ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
+ goto vlan_tci;
+ *features |= BIT_ULL(NPC_ETYPE);
+vlan_tci:
+ /* if key profile does not extract outer vlan tci at all */
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ goto done;
+
+ /* if key profile extracts outer vlan tci from one layer */
+ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag1;
+ if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+
+ /* if key profile extracts outer vlan tci from multiple layers */
+ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
+ goto done;
+ }
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+ }
+ /* check none of higher layers overwrite outer vlan tci */
+ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
+ goto done;
+ *features |= BIT_ULL(NPC_OUTER_VID);
+done:
+ return;
+}
+
+static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ u8 lt, u64 cfg, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 hdr, key, nr_bytes, bit_offset;
+ u8 la_ltype, la_start;
+ /* starting KW index and starting bit position */
+ int start_kwi, offset;
+
+ nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ start_kwi = key / 8;
+ offset = (key * 8) % 64;
+
+ /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
+ * ethernet header.
+ */
+ if (is_npc_intf_tx(intf)) {
+ la_ltype = NPC_LT_LA_IH_NIX_ETHER;
+ la_start = 8;
+ } else {
+ la_ltype = NPC_LT_LA_ETHER;
+ la_start = 0;
+ }
+
+#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
+do { \
+ if (lid == (hlid) && lt == (hlt)) { \
+ if ((hstart) >= hdr && \
+ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
+ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
+ npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
+ npc_set_kw_masks(mcam, (name), (hlen) * 8, \
+ start_kwi, offset + bit_offset, intf);\
+ } \
+ } \
+} while (0)
+
+ /* List LID, LTYPE, start offset from layer and length(in bytes) of
+ * packet header fields below.
+ * Example: Source IP is 4 bytes and starts at 12th byte of IP header
+ */
+ NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
+ NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
+ NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
+}
+
+static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *features = &mcam->rx_features;
+ u64 tcp_udp_sctp;
+ int err, hdr;
+
+ if (is_npc_intf_tx(intf))
+ features = &mcam->tx_features;
+
+ for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
+ err = npc_check_field(rvu, blkaddr, hdr, intf);
+ if (!err)
+ *features |= BIT_ULL(hdr);
+ }
+
+ tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+
+ /* for tcp/udp/sctp corresponding layer type should be in the key */
+ if (*features & tcp_udp_sctp)
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features &= ~tcp_udp_sctp;
+
+ /* for vlan corresponding layer type should be in the key */
+ if (*features & BIT_ULL(NPC_OUTER_VID))
+ if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+}
+
+/* Scan key extraction profile and record how fields of our interest
+ * fill the key structure. Also verify Channel and DMAC exists in
+ * key and not overwritten by other header fields.
+ */
+static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 lid, lt, ld, bitnr;
+ u8 key_nibble = 0;
+ u64 cfg;
+
+ /* Scan and note how parse result is going to be in key.
+ * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
+ * parse result in the key. The enabled nibbles from parse result
+ * will be concatenated in key.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
+ cfg &= NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Scan and note how layer data is going to be in key */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
+ intf);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
+ if (err)
+ return err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
+ if (err)
+ return err;
+
+ /* Channel is mandatory */
+ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite channel */
+ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel cannot be overwritten\n");
+ return -EINVAL;
+ }
+ /* DMAC should be present in key for unicast filter to work */
+ if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite DMAC */
+ if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC cannot be overwritten\n");
+ return -EINVAL;
+ }
+
+ npc_set_features(rvu, blkaddr, NIX_INTF_TX);
+ npc_set_features(rvu, blkaddr, NIX_INTF_RX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
+
+ return 0;
+}
+
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ INIT_LIST_HEAD(&mcam->mcam_rules);
+
+ return npc_scan_verify_kex(rvu, blkaddr);
+}
+
+static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *mcam_features = &mcam->rx_features;
+ u64 unsupported;
+ u8 bit;
+
+ if (is_npc_intf_tx(intf))
+ mcam_features = &mcam->tx_features;
+
+ unsupported = (*mcam_features ^ features) & ~(*mcam_features);
+ if (unsupported) {
+ dev_info(rvu->dev, "Unsupported flow(s):\n");
+ for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
+ dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* npc_update_entry - Based on the masks generated during
+ * the key scanning, updates the given entry with value and
+ * masks for the field of interest. Maximum 16 bytes of a packet
+ * header can be extracted by HW hence lo and hi are sufficient.
+ * When field bytes are less than or equal to 8 then hi should be
+ * 0 for value and mask.
+ *
+ * If exact match of value is required then mask should be all 1's.
+ * If any bits in mask are 0 then corresponding bits in value are
+ * dont care.
+ */
+static void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry dummy = { {0} };
+ struct npc_key_field *field;
+ u64 kw1, kw2, kw3;
+ u8 shift;
+ int i;
+
+ field = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (!field->nr_kws)
+ return;
+
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ /* place key value in kw[x] */
+ shift = __ffs64(field->kw_mask[i]);
+ /* update entry value */
+ kw1 = (val_lo << shift) & field->kw_mask[i];
+ dummy.kw[i] = kw1;
+ /* update entry mask */
+ kw1 = (mask_lo << shift) & field->kw_mask[i];
+ dummy.kw_mask[i] = kw1;
+
+ if (field->nr_kws == 1)
+ break;
+ /* place remaining bits of key value in kw[x + 1] */
+ if (field->nr_kws == 2) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw[i + 1] = kw2;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw_mask[i + 1] = kw2;
+ break;
+ }
+ /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
+ if (field->nr_kws == 3) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? val_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw[i + 1] = kw2;
+ dummy.kw[i + 2] = kw3;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? mask_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw_mask[i + 1] = kw2;
+ dummy.kw_mask[i + 2] = kw3;
+ break;
+ }
+ }
+ /* dummy is ready with values and masks for given key
+ * field now clear and update input entry with those
+ */
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ entry->kw[i] &= ~field->kw_mask[i];
+ entry->kw_mask[i] &= ~field->kw_mask[i];
+
+ entry->kw[i] |= dummy.kw[i];
+ entry->kw_mask[i] |= dummy.kw_mask[i];
+ }
+}
+
+#define IPV6_WORDS 4
+
+static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS];
+ u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS];
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+ u64 mask_lo, mask_hi;
+ u64 val_lo, val_hi;
+
+ /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet
+ * values to be programmed in MCAM should as below:
+ * val_high: 0xfe80000000000000
+ * val_low: 0x2c6863fffe5e2d0a
+ */
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+
+ mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1];
+ mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3];
+ val_hi = (u64)src_ip[0] << 32 | src_ip[1];
+ val_lo = (u64)src_ip[2] << 32 | src_ip[3];
+
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src));
+ memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src));
+ }
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS);
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+
+ mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1];
+ mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3];
+ val_hi = (u64)dst_ip[0] << 32 | dst_ip[1];
+ val_lo = (u64)dst_ip[2] << 32 | dst_ip[3];
+
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst));
+ memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst));
+ }
+}
+
+static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u64 dmac_mask = ether_addr_to_u64(mask->dmac);
+ u64 smac_mask = ether_addr_to_u64(mask->smac);
+ u64 dmac_val = ether_addr_to_u64(pkt->dmac);
+ u64 smac_val = ether_addr_to_u64(pkt->smac);
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+
+ if (!features)
+ return;
+
+ /* For tcp/udp/sctp LTYPE should be present in entry */
+ if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP)))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
+ 0, ~0ULL, 0, intf);
+ if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP)))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
+ 0, ~0ULL, 0, intf);
+ if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP)))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
+ 0, ~0ULL, 0, intf);
+
+ if (features & BIT_ULL(NPC_OUTER_VID))
+ npc_update_entry(rvu, NPC_LB, entry,
+ NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
+ NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+
+#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
+do { \
+ if (features & BIT_ULL((field))) { \
+ npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \
+ (mask_lo), (mask_hi), intf); \
+ memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \
+ memcpy(&omask->member, &mask->member, sizeof(mask->member)); \
+ } \
+} while (0)
+
+ NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
+ NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
+ ntohs(mask->etype), 0);
+ NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
+ ntohl(mask->ip4src), 0);
+ NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
+ ntohl(mask->ip4dst), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+
+ NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+
+ npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
+}
+
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
+ u16 entry)
+{
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry == entry) {
+ mutex_unlock(&mcam->lock);
+ return iter;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ return NULL;
+}
+
+static void rvu_mcam_add_rule(struct npc_mcam *mcam,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct list_head *head = &mcam->mcam_rules;
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry > rule->entry)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&rule->list, head);
+ mutex_unlock(&mcam->lock);
+}
+
+static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
+ &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
+static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_rx_action action;
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0,
+ ~0ULL, 0, NIX_INTF_RX);
+
+ *(u64 *)&action = 0x00;
+ action.pf_func = target;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+ action.flow_key_alg = req->flow_key_alg;
+
+ if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule)
+ action = pfvf->def_ucast_rule->rx_action;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
+ FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
+ FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
+ FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
+ FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
+ FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+}
+
+static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_tx_action action;
+
+ npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
+ 0, ~0ULL, 0, NIX_INTF_TX);
+
+ *(u64 *)&action = 0x00;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
+ FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
+ FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
+ FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
+ FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
+ FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+}
+
+static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
+ int nixlf, struct rvu_pfvf *pfvf,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp, bool enable,
+ bool pf_set_vfs_mac)
+{
+ struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule;
+ u64 features, installed_features, missing_features = 0;
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule dummy = { 0 };
+ struct rvu_npc_mcam_rule *rule;
+ bool new = false, msg_from_vf;
+ u16 owner = req->hdr.pcifunc;
+ struct msg_rsp write_rsp;
+ struct mcam_entry *entry;
+ int entry_index, err;
+
+ msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK);
+
+ installed_features = req->features;
+ features = req->features;
+ entry = &write_req.entry_data;
+ entry_index = req->entry;
+
+ npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
+ req->intf);
+
+ if (is_npc_intf_rx(req->intf))
+ npc_update_rx_entry(rvu, pfvf, entry, req, target);
+ else
+ npc_update_tx_entry(rvu, pfvf, entry, req, target);
+
+ /* Default unicast rules do not exist for TX */
+ if (is_npc_intf_tx(req->intf))
+ goto find_rule;
+
+ if (def_ucast_rule)
+ missing_features = (def_ucast_rule->features ^ features) &
+ def_ucast_rule->features;
+
+ if (req->default_rule && req->append) {
+ /* add to default rule */
+ if (missing_features)
+ npc_update_flow(rvu, entry, missing_features,
+ &def_ucast_rule->packet,
+ &def_ucast_rule->mask,
+ &dummy, req->intf);
+ enable = rvu_npc_write_default_rule(rvu, blkaddr,
+ nixlf, target,
+ pfvf->nix_rx_intf, entry,
+ &entry_index);
+ installed_features = req->features | missing_features;
+ } else if (req->default_rule && !req->append) {
+ /* overwrite default rule */
+ enable = rvu_npc_write_default_rule(rvu, blkaddr,
+ nixlf, target,
+ pfvf->nix_rx_intf, entry,
+ &entry_index);
+ } else if (msg_from_vf) {
+ /* normal rule - include default rule also to it for VF */
+ npc_update_flow(rvu, entry, missing_features,
+ &def_ucast_rule->packet, &def_ucast_rule->mask,
+ &dummy, req->intf);
+ installed_features = req->features | missing_features;
+ }
+
+find_rule:
+ rule = rvu_mcam_find_rule(mcam, entry_index);
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+ new = true;
+ }
+ /* no counter for default rule */
+ if (req->default_rule)
+ goto update_rule;
+
+ /* allocate new counter if rule has no counter */
+ if (req->set_cntr && !rule->has_cntr)
+ rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
+
+ /* if user wants to delete an existing counter for a rule then
+ * free the counter
+ */
+ if (!req->set_cntr && rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+
+ write_req.hdr.pcifunc = owner;
+ write_req.entry = req->entry;
+ write_req.intf = req->intf;
+ write_req.enable_entry = (u8)enable;
+ /* if counter is available then clear and use it */
+ if (req->set_cntr && rule->has_cntr) {
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
+ write_req.set_cntr = 1;
+ write_req.cntr = rule->cntr;
+ }
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new)
+ kfree(rule);
+ return err;
+ }
+update_rule:
+ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
+ memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
+ rule->entry = entry_index;
+ memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
+ if (is_npc_intf_tx(req->intf))
+ memcpy(&rule->tx_action, &entry->action,
+ sizeof(struct nix_tx_action));
+ rule->vtag_action = entry->vtag_action;
+ rule->features = installed_features;
+ rule->default_rule = req->default_rule;
+ rule->owner = owner;
+ rule->enable = enable;
+ if (is_npc_intf_tx(req->intf))
+ rule->intf = pfvf->nix_tx_intf;
+ else
+ rule->intf = pfvf->nix_rx_intf;
+
+ if (new)
+ rvu_mcam_add_rule(mcam, rule);
+ if (req->default_rule)
+ pfvf->def_ucast_rule = rule;
+
+ /* VF's MAC address is being changed via PF */
+ if (pf_set_vfs_mac) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ }
+
+ if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ rule->vfvlan_cfg = true;
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp)
+{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+ bool pf_set_vfs_mac = false;
+ bool enable = true;
+ u16 target;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return -EINVAL;
+
+ if (from_vf && req->default_rule)
+ return NPC_MCAM_PERM_DENIED;
+
+ /* Each PF/VF info is maintained in struct rvu_pfvf.
+ * rvu_pfvf for the target PF/VF needs to be retrieved
+ * hence modify pcifunc accordingly.
+ */
+
+ /* AF installing for a PF/VF */
+ if (!req->hdr.pcifunc)
+ target = req->vf;
+ /* PF installing for its VF */
+ else if (!from_vf && req->vf) {
+ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
+ pf_set_vfs_mac = req->default_rule &&
+ (req->features & BIT_ULL(NPC_DMAC));
+ }
+ /* msg received from PF/VF */
+ else
+ target = req->hdr.pcifunc;
+
+ if (npc_check_unsupported_flows(rvu, req->features, req->intf))
+ return -EOPNOTSUPP;
+
+ if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ /* PF installing for its VF */
+ if (req->hdr.pcifunc && !from_vf && req->vf)
+ pfvf->pf_set_vf_cfg = 1;
+
+ /* update req destination mac addr */
+ if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
+ is_zero_ether_addr(req->packet.dmac)) {
+ ether_addr_copy(req->packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ }
+
+ err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+
+ /* If interface is uninitialized then do not enable entry */
+ if (err || (!req->default_rule && !pfvf->def_ucast_rule))
+ enable = false;
+
+ /* Packets reaching NPC in Tx path implies that a
+ * NIXLF is properly setup and transmitting.
+ * Hence rules can be enabled for Tx.
+ */
+ if (is_npc_intf_tx(req->intf))
+ enable = true;
+
+ /* Do not allow requests from uninitialized VFs */
+ if (from_vf && !enable)
+ return -EINVAL;
+
+ /* If message is from VF then its flow should not overlap with
+ * reserved unicast flow.
+ */
+ if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) &&
+ pfvf->def_ucast_rule->features & req->features)
+ return -EINVAL;
+
+ return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp,
+ enable, pf_set_vfs_mac);
+}
+
+static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
+ u16 pcifunc)
+{
+ struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
+ struct msg_rsp dis_rsp;
+
+ if (rule->default_rule)
+ return 0;
+
+ if (rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
+
+ dis_req.hdr.pcifunc = pcifunc;
+ dis_req.entry = rule->entry;
+
+ list_del(&rule->list);
+ kfree(rule);
+
+ return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
+}
+
+int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ struct npc_delete_flow_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *iter, *tmp;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct list_head del_list;
+
+ INIT_LIST_HEAD(&del_list);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
+ if (iter->owner == pcifunc) {
+ /* All rules */
+ if (req->all) {
+ list_move_tail(&iter->list, &del_list);
+ /* Range of rules */
+ } else if (req->end && iter->entry >= req->start &&
+ iter->entry <= req->end) {
+ list_move_tail(&iter->list, &del_list);
+ /* single rule */
+ } else if (req->entry == iter->entry) {
+ list_move_tail(&iter->list, &del_list);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ list_for_each_entry_safe(iter, tmp, &del_list, list) {
+ /* clear the mcam entry target pcifunc */
+ mcam->entry2target_pffunc[iter->entry] = 0x0;
+ if (npc_delete_flow(rvu, iter, pcifunc))
+ dev_err(rvu->dev, "rule deletion failed for entry:%d",
+ iter->entry);
+ }
+
+ return 0;
+}
+
+static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
+ struct rvu_npc_mcam_rule *rule,
+ struct rvu_pfvf *pfvf)
+{
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct mcam_entry *entry = &write_req.entry_data;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct msg_rsp rsp;
+ u8 intf, enable;
+ int err;
+
+ ether_addr_copy(rule->packet.dmac, pfvf->mac_addr);
+
+ npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry,
+ entry, &intf, &enable);
+
+ npc_update_entry(rvu, NPC_DMAC, entry,
+ ether_addr_to_u64(pfvf->mac_addr), 0,
+ 0xffffffffffffull, 0, intf);
+
+ write_req.hdr.pcifunc = rule->owner;
+ write_req.entry = rule->entry;
+
+ mutex_unlock(&mcam->lock);
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp);
+ mutex_lock(&mcam->lock);
+
+ return err;
+}
+
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr, bank, index;
+ u64 def_action;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ def_ucast_rule = pfvf->def_ucast_rule;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == target && !rule->enable) {
+ if (rule->default_rule) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ continue;
+ }
+
+ if (rule->vfvlan_cfg)
+ npc_update_dmac_value(rvu, blkaddr, rule, pfvf);
+
+ if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
+ if (!def_ucast_rule)
+ continue;
+ /* Use default unicast entry action */
+ rule->rx_action = def_ucast_rule->rx_action;
+ def_action = *(u64 *)&def_ucast_rule->rx_action;
+ bank = npc_get_bank(mcam, rule->entry);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION
+ (rule->entry, bank), def_action);
+ }
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ }
+ }
+
+ /* Enable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, true);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+ /* Disable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, false);
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
index 9d7c135c7965..e266f0c49559 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -35,7 +35,7 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
{0x1200, 0x12E0} } },
{NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
{0x1610, 0x1618} } },
- {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 7ca599b973c0..1f3379f12b81 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -54,20 +54,20 @@
#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
-#define RVU_PRIV_PFX_NIX0_CFG (0x8000300)
+#define RVU_PRIV_PFX_NIXX_CFG(a) (0x8000300 | (a) << 3)
#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
-#define RVU_PRIV_PFX_CPT0_CFG (0x8000350)
+#define RVU_PRIV_PFX_CPTX_CFG(a) (0x8000350 | (a) << 3)
#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
-#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300)
+#define RVU_PRIV_HWVFX_NIXX_CFG(a) (0x8001300 | (a) << 3)
#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
-#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350)
+#define RVU_PRIV_HWVFX_CPTX_CFG(a) (0x8001350 | (a) << 3)
/* RVU PF registers */
#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
@@ -446,6 +446,8 @@
#define NPC_AF_BLK_RST (0x00040)
#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_CONST2 (0x00100)
+#define NPC_AF_CONST3 (0x00110)
#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
#define NPC_AF_PCK_CFG (0x00600)
#define NPC_AF_PCK_DEF_OL2 (0x00610)
@@ -469,20 +471,7 @@
(0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
(0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
- (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
- (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
- (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4)
-#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
- (0x1880000 | (a) << 8 | (b) << 4)
-#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8)
#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
-#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4)
-#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
- (0x1900008 | (a) << 8 | (b) << 4)
#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
@@ -499,6 +488,70 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000000ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000010ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000020ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1800000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000038ull | (a) << 8 | (b) << 22); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000040ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900008ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000048ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000050ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MATCH_STATX(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880008ull | (a) << 8); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000078ull | (a) << 8); \
+ offset; }) \
+
/* NDC */
#define NDC_AF_CONST (0x00000)
#define NDC_AF_CLK_EN (0x00020)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index a3ecb5de9000..723643868589 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -14,6 +14,8 @@
/* RVU Block revision IDs */
#define RVU_BLK_RVUM_REVID 0x01
+#define RVU_MULTI_BLK_VER 0x7ULL
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL,
@@ -31,7 +33,9 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NIX0_RX = 0xcULL,
BLKADDR_NDC_NIX0_TX = 0xdULL,
BLKADDR_NDC_NPA0 = 0xeULL,
- BLK_COUNT = 0xfULL,
+ BLKADDR_NDC_NIX1_RX = 0x10ULL,
+ BLKADDR_NDC_NIX1_TX = 0x11ULL,
+ BLK_COUNT = 0x12ULL,
};
/* RVU Block Type Enumeration */
@@ -917,4 +921,15 @@ enum nix_vtag_size {
VTAGSIZE_T4 = 0x0,
VTAGSIZE_T8 = 0x1,
};
+
+enum nix_tx_vtag_op {
+ NOP = 0x0,
+ VTAG_INSERT = 0x1,
+ VTAG_REPLACE = 0x2,
+};
+
+/* NIX RX VTAG actions */
+#define VTAG_STRIP BIT_ULL(4)
+#define VTAG_CAPTURE BIT_ULL(5)
+
#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index b2c6385707c9..4193ae3bde6b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o
+ otx2_ptp.o otx2_flows.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index d2581090f9a4..68fb4e4757aa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -191,10 +191,14 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- else
+ /* update dmac field in vlan offload rule */
+ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_install_rxvlan_offload_flow(pfvf);
+ } else {
return -EPERM;
+ }
return 0;
}
@@ -531,8 +535,10 @@ static int otx2_get_link(struct otx2_nic *pfvf)
link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
}
/* LBK channel */
- if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
- link = 12;
+ if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) {
+ map = pfvf->hw.tx_chan_base & 0x7FF;
+ link = pfvf->hw.cgx_links | ((map >> 8) & 0xF);
+ }
return link;
}
@@ -1237,7 +1243,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
sq = &qset->sq[qidx];
sq->sqb_count = 0;
- sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
if (!sq->sqb_ptrs)
return -ENOMEM;
@@ -1503,6 +1509,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
+ pfvf->hw.cgx_links = rsp->cgx_links;
+ pfvf->hw.lbk_links = rsp->lbk_links;
}
EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index d6253f2a414d..b18b45d02165 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -18,6 +18,7 @@
#include <linux/timecounter.h>
#include <mbox.h>
+#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include <rvu_trace.h>
@@ -197,12 +198,17 @@ struct otx2_hw {
struct otx2_drv_stats drv_stats;
u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
};
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
bool intf_down; /* interface was either configured or not */
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ int tx_vtag_idx;
};
struct flr_work {
@@ -226,6 +232,32 @@ struct otx2_ptp {
#define OTX2_HW_TIMESTAMP_LEN 8
+struct otx2_mac_table {
+ u8 addr[ETH_ALEN];
+ u16 mcam_entry;
+ bool inuse;
+};
+
+struct otx2_flow_config {
+ u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
+ u32 nr_flows;
+#define OTX2_MAX_NTUPLE_FLOWS 32
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
+ OTX2_MAX_UNICAST_FLOWS + \
+ OTX2_MAX_VLAN_FLOWS)
+ u32 ntuple_offset;
+ u32 unicast_offset;
+ u32 rx_vlan_offset;
+ u32 vf_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
+#define OTX2_VF_VLAN_RX_INDEX 0
+#define OTX2_VF_VLAN_TX_INDEX 1
+ u32 ntuple_max_flows;
+ struct list_head flow_list;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -236,6 +268,12 @@ struct otx2_nic {
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
+#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
+#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
+#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
+#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
+#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
@@ -264,6 +302,7 @@ struct otx2_nic {
struct refill_work *refill_wrk;
struct workqueue_struct *otx2_wq;
struct work_struct rx_mode_work;
+ struct otx2_mac_table *mac_table;
/* Ethtool stuff */
u32 msg_enable;
@@ -273,6 +312,8 @@ struct otx2_nic {
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
+
+ struct otx2_flow_config *flow_cfg;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -642,4 +683,24 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+/* MCAM filter related APIs */
+int otx2_mcam_flow_init(struct otx2_nic *pf);
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
+void otx2_mcam_flow_del(struct otx2_nic *pf);
+int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
+int otx2_get_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 location);
+int otx2_get_all_flows(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int otx2_add_flow(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp);
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
+int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req);
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 662fb80dbb9d..67171b66a56c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -551,6 +551,16 @@ static int otx2_get_rxnfc(struct net_device *dev,
nfc->data = pfvf->hw.rx_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = otx2_get_all_flows(pfvf, nfc, rules);
+ break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
default:
@@ -561,6 +571,50 @@ static int otx2_get_rxnfc(struct net_device *dev,
static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = otx2_set_rss_hash_opts(pfvf, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_add_flow(pfvf, &nfc->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_remove_flow(pfvf, nfc->fs.location);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int otx2vf_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rules)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ nfc->data = pfvf->hw.rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return otx2_get_rss_hash_opts(pfvf, nfc);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+{
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -806,8 +860,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_sset_count = otx2vf_get_sset_count,
.set_channels = otx2_set_channels,
.get_channels = otx2_get_channels,
- .get_rxnfc = otx2_get_rxnfc,
- .set_rxnfc = otx2_set_rxnfc,
+ .get_rxnfc = otx2vf_get_rxnfc,
+ .set_rxnfc = otx2vf_set_rxnfc,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
new file mode 100644
index 000000000000..be8ccfce1848
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <net/ipv6.h>
+
+#include "otx2_common.h"
+
+#define OTX2_DEFAULT_ACTION 0x1
+
+struct otx2_flow {
+ struct ethtool_rx_flow_spec flow_spec;
+ struct list_head list;
+ u32 location;
+ u16 entry;
+ bool is_vf;
+ int vf;
+};
+
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int vf_vlan_max_flows;
+ int i;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ req->contig = false;
+ req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ if (rsp->count != req->count) {
+ netdev_info(pfvf->netdev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ req->count, rsp->count);
+ /* support only ntuples here */
+ flow_cfg->ntuple_max_flows = rsp->count;
+ flow_cfg->ntuple_offset = 0;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ } else {
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
+ vf_vlan_max_flows;
+ flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
+ OTX2_MAX_NTUPLE_FLOWS;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ }
+
+ for (i = 0; i < rsp->count; i++)
+ flow_cfg->entry[i] = rsp->entry_list[i];
+
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_mcam_flow_init(struct otx2_nic *pf)
+{
+ int err;
+
+ pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pf->flow_cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
+
+ pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
+
+ err = otx2_alloc_mcam_entries(pf);
+ if (err)
+ return err;
+
+ pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
+ * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+ if (!pf->mac_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void otx2_mcam_flow_del(struct otx2_nic *pf)
+{
+ otx2_destroy_mcam_flows(pf);
+}
+
+/* On success adds mcam entry
+ * On failure enable promisous mode
+ */
+static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err, i;
+
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return -ENOMEM;
+
+ /* dont have free mcam entries or uc list is greater than alloted */
+ if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ return -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* unicast offset starts with 32 0..31 for ntuple */
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (pf->mac_table[i].inuse)
+ continue;
+ ether_addr_copy(pf->mac_table[i].addr, mac);
+ pf->mac_table[i].inuse = true;
+ pf->mac_table[i].mcam_entry =
+ flow_cfg->entry[i + flow_cfg->unicast_offset];
+ req->entry = pf->mac_table[i].mcam_entry;
+ break;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ return otx2_do_add_macfilter(pf, mac);
+}
+
+static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
+ int *mcam_entry)
+{
+ int i;
+
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (!pf->mac_table[i].inuse)
+ continue;
+
+ if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
+ *mcam_entry = pf->mac_table[i].mcam_entry;
+ pf->mac_table[i].inuse = false;
+ return true;
+ }
+ }
+ return false;
+}
+
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct npc_delete_flow_req *req;
+ int err, mcam_entry;
+
+ /* check does mcam entry exists for given mac */
+ if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
+ return 0;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->entry = mcam_entry;
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ struct list_head *head = &pfvf->flow_cfg->flow_list;
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location > flow->location)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&flow->list, head);
+}
+
+int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 location)
+{
+ struct otx2_flow *iter;
+
+ if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location) {
+ nfc->fs = iter->flow_spec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ u32 location = 0;
+ int idx = 0;
+ int err = 0;
+
+ nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
+ err = otx2_get_flow(pfvf, nfc, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+
+ return err;
+}
+
+static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IP_USER_FLOW:
+ if (ipv4_usr_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_usr_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if (ipv4_l4_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_l4_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ipv4_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv4_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IPV6_USER_FLOW:
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ if (ipv6_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv6_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+ u32 flow_type;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ switch (flow_type) {
+ /* bits not set in mask are don't care */
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ ether_addr_copy(pkt->smac, eth_hdr->h_source);
+ ether_addr_copy(pmask->smac, eth_mask->h_source);
+ req->features |= BIT_ULL(NPC_SMAC);
+ }
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
+ ether_addr_copy(pmask->dmac, eth_mask->h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ if (eth_mask->h_proto) {
+ memcpy(&pkt->etype, &eth_hdr->h_proto,
+ sizeof(pkt->etype));
+ memcpy(&pmask->etype, &eth_mask->h_proto,
+ sizeof(pmask->etype));
+ req->features |= BIT_ULL(NPC_ETYPE);
+ }
+ break;
+ case IP_USER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ break;
+ case IPV6_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (fsp->flow_type & FLOW_EXT) {
+ if (fsp->m_ext.vlan_etype)
+ return -EINVAL;
+ if (fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
+ return -EINVAL;
+ if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+
+ memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
+ sizeof(pkt->vlan_tci));
+ memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
+ sizeof(pmask->vlan_tci));
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+
+ /* Not Drop/Direct to queue but use action in default entry */
+ if (fsp->m_ext.data[1] &&
+ fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
+ req->op = NIX_RX_ACTION_DEFAULT;
+ }
+
+ if (fsp->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fsp->m_ext.h_dest)) {
+ ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
+ ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+
+ if (!req->features)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ u64 ring_cookie = flow->flow_spec.ring_cookie;
+ struct npc_install_flow_req *req;
+ int err, vf = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_prepare_flow_request(&flow->flow_spec, req);
+ if (err) {
+ /* free the allocated msg above */
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ req->entry = flow->entry;
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ req->channel = pfvf->hw.rx_chan_base;
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ req->op = NIX_RX_ACTIONOP_DROP;
+ } else {
+ /* change to unicast only if action of default entry is not
+ * requested by user
+ */
+ if (req->op != NIX_RX_ACTION_DEFAULT)
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ if (vf > pci_num_vf(pfvf->pdev)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+ }
+
+ /* ethtool ring_cookie has (VF + 1) for VF */
+ if (vf) {
+ req->vf = vf;
+ flow->is_vf = true;
+ flow->vf = vf;
+ }
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ struct otx2_flow *flow;
+ bool new = false;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return -ENOMEM;
+
+ if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (fsp->location >= flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, fsp->location);
+ if (!flow) {
+ flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ if (!flow)
+ return -ENOMEM;
+ flow->location = fsp->location;
+ flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
+ flow->location];
+ new = true;
+ }
+ /* struct copy */
+ flow->flow_spec = *fsp;
+
+ err = otx2_add_flow_msg(pfvf, flow);
+ if (err) {
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* add the new flow installed to list */
+ if (new) {
+ otx2_add_flow_to_list(pfvf, flow);
+ flow_cfg->nr_flows++;
+ }
+
+ return 0;
+}
+
+static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+ if (all)
+ req->all = 1;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_flow *flow;
+ int err;
+
+ if (location >= flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, location);
+ if (!flow)
+ return -ENOENT;
+
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (err)
+ return err;
+
+ list_del(&flow->list);
+ kfree(flow);
+ flow_cfg->nr_flows--;
+
+ return 0;
+}
+
+int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
+ req->end = flow_cfg->entry[flow_cfg->ntuple_offset +
+ flow_cfg->ntuple_max_flows - 1];
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+ return err;
+}
+
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return 0;
+
+ /* remove all flows */
+ err = otx2_remove_flow_msg(pfvf, 0, true);
+ if (err)
+ return err;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->all = 1;
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->intf = NIX_INTF_RX;
+ ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->channel = pfvf->hw.rx_chan_base;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
+{
+ struct nix_vtag_config *req;
+ struct mbox_msghdr *rsp_hdr;
+ int err;
+
+ /* Dont have enough mcam entries */
+ if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
+ return -ENOMEM;
+
+ if (enable) {
+ err = otx2_install_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ } else {
+ err = otx2_delete_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* config strip, capture and size */
+ req->vtag_size = VTAGSIZE_T4;
+ req->cfg_type = 1; /* rx vlan cfg */
+ req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req->rx.strip_vtag = enable;
+ req->rx.capture_vtag = enable;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+
+ rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp_hdr)) {
+ mutex_unlock(&pf->mbox.lock);
+ return PTR_ERR(rsp_hdr);
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return rsp_hdr->rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 66f1a212f1f4..4c82f60f3cf3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1278,6 +1278,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
static int otx2_init_hw_resources(struct otx2_nic *pf)
{
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_hw *hw = &pf->hw;
struct msg_req *req;
@@ -1359,8 +1360,9 @@ err_free_rq_ptrs:
otx2_aura_pool_free(pf);
err_free_nix_lf:
mutex_lock(&mbox->lock);
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1379,6 +1381,7 @@ exit:
static void otx2_free_hw_resources(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
struct msg_req *req;
@@ -1419,8 +1422,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_lock(&mbox->lock);
/* Reset NIX LF */
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
+ if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
+ free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1562,6 +1568,9 @@ int otx2_open(struct net_device *netdev)
otx2_set_cints_affinity(pf);
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_enable_rxvlan(pf, true);
+
/* When reinitializing enable time stamping if it is enabled before */
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
@@ -1716,10 +1725,20 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
struct net_device *netdev = pf->netdev;
struct nix_rx_mode *req;
+ bool promisc = false;
if (!(netdev->flags & IFF_UP))
return;
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) {
@@ -1729,8 +1748,7 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
req->mode = NIX_RX_MODE_UCAST;
- /* We don't support MAC address filtering yet */
- if (netdev->flags & IFF_PROMISC)
+ if (promisc)
req->mode |= NIX_RX_MODE_PROMISC;
else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
@@ -1743,11 +1761,20 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = features ^ netdev->features;
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
features & NETIF_F_LOOPBACK);
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
+ return otx2_enable_rxvlan(pf,
+ features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pf);
+
return 0;
}
@@ -1903,6 +1930,245 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
}
}
+static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
+{
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->default_rule = 1;
+ req->append = 1;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+ int ret;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ether_addr_copy(config->mac, mac);
+
+ ret = otx2_do_set_vf_mac(pf, vf, mac);
+ if (ret == 0)
+ dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
+
+ return ret;
+}
+
+static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct nix_vtag_config_rsp *vtag_rsp;
+ struct npc_delete_flow_req *del_req;
+ struct nix_vtag_config *vtag_req;
+ struct npc_install_flow_req *req;
+ struct otx2_vf_config *config;
+ int err = 0;
+ u32 idx;
+
+ config = &pf->vf_configs[vf];
+
+ if (!vlan && !config->vlan)
+ goto out;
+
+ mutex_lock(&pf->mbox.lock);
+
+ /* free old tx vtag entry */
+ if (config->vlan) {
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ vtag_req->cfg_type = 0;
+ vtag_req->tx.free_vtag0 = 1;
+ vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+ }
+
+ if (!vlan && config->vlan) {
+ /* rx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ del_req->entry =
+ flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ del_req->entry =
+ flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ goto out;
+ }
+
+ /* rx */
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->packet.vlan_tci = htons(vlan);
+ req->mask.vlan_tci = htons(VLAN_VID_MASK);
+ /* af fills the destination mac addr */
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* configure tx vtag params */
+ vtag_req->vtag_size = VTAGSIZE_T4;
+ vtag_req->cfg_type = 0; /* tx vlan cfg */
+ vtag_req->tx.cfg_vtag0 = 1;
+ vtag_req->tx.vtag0 = (ntohs(proto) << 16) | vlan;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
+ (&pf->mbox.mbox, 0, &vtag_req->hdr);
+ if (IS_ERR(vtag_rsp)) {
+ err = PTR_ERR(vtag_rsp);
+ goto out;
+ }
+ config->tx_vtag_idx = vtag_rsp->vtag0_idx;
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ eth_zero_addr((u8 *)&req->mask.dmac);
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.tx_chan_base;
+ req->intf = NIX_INTF_TX;
+ req->vf = vf + 1;
+ req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+ req->vtag0_def = vtag_rsp->vtag0_idx;
+ req->vtag0_op = VTAG_INSERT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ config->vlan = vlan;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ /* qos is currently unsupported */
+ if (vlan >= VLAN_N_VID || qos)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
+ return -EOPNOTSUPP;
+
+ return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
+}
+
+static int otx2_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, config->mac);
+ ivi->vlan = config->vlan;
+
+ return 0;
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -1914,6 +2180,9 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_do_ioctl = otx2_ioctl,
+ .ndo_set_vf_mac = otx2_set_vf_mac,
+ .ndo_set_vf_vlan = otx2_set_vf_vlan,
+ .ndo_get_vf_config = otx2_get_vf_config,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -2110,6 +2379,25 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
+ err = otx2_mcam_flow_init(pf);
+ if (err)
+ goto err_ptp_destroy;
+
+ if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
+ if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ netdev->features |= netdev->hw_features;
+
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
@@ -2122,7 +2410,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_del_mcam_entries;
}
err = otx2_wq_init(pf);
@@ -2142,6 +2430,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_del_mcam_entries:
+ otx2_mcam_flow_del(pf);
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
@@ -2285,6 +2575,8 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
+
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
otx2_config_hw_tx_tstamp(pf, false);
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
@@ -2300,6 +2592,7 @@ static void otx2_remove(struct pci_dev *pdev)
destroy_workqueue(pf->otx2_wq);
otx2_ptp_destroy(pf);
+ otx2_mcam_flow_del(pf);
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index d5d7a2f37493..d0e25414f1a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -556,6 +556,19 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->tstmp = 1;
}
+#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
+ if (skb_vlan_tag_present(skb)) {
+ if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ ext->vlan1_ins_ena = 1;
+ ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
+ } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ ext->vlan0_ins_ena = 1;
+ ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
+ }
+ }
+
*offset += sizeof(*ext);
}
@@ -871,6 +884,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
}
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ /* Insert vlan tag before giving pkt to tso */
+ if (skb_vlan_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 67fabf265fe6..d3e4cfd244e2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -558,6 +558,11 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ netdev->features |= netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 25981a7a43b5..ebe1406c6e64 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4900,7 +4900,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
};
if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
- strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
+ snprintf(buf, sz, "%s", name[chipid - CHIP_ID_YUKON_XL]);
else
snprintf(buf, sz, "(chip %#x)", chipid);
return buf;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 502d1b97855c..b0f79a5151cf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -684,7 +684,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp_prog = rcu_dereference(ring->xdp_prog);
xdp.rxq = &ring->xdp_rxq;
xdp.frame_sz = priv->frag_info[0].frag_stride;
- doorbell_pending = 0;
+ doorbell_pending = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
index 582997577a04..954b86faac29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
@@ -135,7 +135,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
* @dev: mlx4_dev.
* @port: Physical port number.
* @vport: Vport id.
- * @out_param: Array of mlx4_vport_qos_param which holds the requested values.
+ * @in_param: Array of mlx4_vport_qos_param which holds the requested values.
*
* Returns 0 on success or a negative mlx4_core errno code.
**/
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1187ef1375e2..394f43add85c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -300,7 +300,7 @@ static const char *resource_str(enum mlx4_resource rt)
case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
- };
+ }
}
static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 2d477f9a8cb7..83a67ca43a41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -81,7 +81,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
- steering/dr_icm_pool.o \
+ steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 38e4f19d69f8..43271a3856ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "en/params.h"
+#include "en/txrx.h"
+#include "en_accel/tls_rxtx.h"
static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -152,3 +154,35 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
+
+u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
+ u16 stop_room;
+
+ stop_room = mlx5e_tls_get_stop_room(mdev, params);
+ stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ if (is_mpwqe)
+ /* A MPWQE can take up to the maximum-sized WQE + all the normal
+ * stop room can be taken if a new packet breaks the active
+ * MPWQE session and allocates its WQEs right away.
+ */
+ stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+
+ return stop_room;
+}
+
+int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params)
+{
+ size_t sq_size = 1 << params->log_sq_size;
+ u16 stop_room;
+
+ stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
+ if (stop_room >= sq_size) {
+ netdev_err(priv->netdev, "Stop room %hu is bigger than the SQ size %zu\n",
+ stop_room, sq_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index a87273e801b2..187007ad3349 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -30,6 +30,7 @@ struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
bool is_mpw;
+ u16 stop_room;
};
struct mlx5e_channel_param {
@@ -124,4 +125,7 @@ void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_sq_param *param);
+u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params);
+
#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index ae90d533a350..2e3e78b0f333 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -366,7 +366,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
u32 *xsk_frames,
- bool recycle)
+ bool recycle,
+ struct xdp_frame_bulk *bq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
u16 i;
@@ -379,7 +380,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
/* XDP_TX from the XSK RQ and XDP_REDIRECT */
dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
xdpi.frame.xdpf->len, DMA_TO_DEVICE);
- xdp_return_frame(xdpi.frame.xdpf);
+ xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
break;
case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */
@@ -397,12 +398,15 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{
+ struct xdp_frame_bulk bq;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
u32 xsk_frames = 0;
u16 sqcc;
int i;
+ xdp_frame_bulk_init(&bq);
+
sq = container_of(cq, struct mlx5e_xdpsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
@@ -434,7 +438,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq);
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
@@ -447,6 +451,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
}
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ xdp_flush_frame_bulk(&bq);
+
if (xsk_frames)
xsk_tx_completed(sq->xsk_pool, xsk_frames);
@@ -463,8 +469,13 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
{
+ struct xdp_frame_bulk bq;
u32 xsk_frames = 0;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
while (sq->cc != sq->pc) {
struct mlx5e_xdp_wqe_info *wi;
u16 ci;
@@ -474,9 +485,12 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
sq->cc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq);
}
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
if (xsk_frames)
xsk_tx_completed(sq->xsk_pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index b140e13fdcc8..d16def68ecff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -13,20 +13,20 @@ struct mlx5e_dump_wqe {
(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
static u8
-mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
+mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
unsigned int sync_len)
{
/* Given the MTU and sync_len, calculates an upper bound for the
* number of DUMP WQEs needed for the TX resync of a record.
*/
- return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
+ return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
}
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params)
{
u16 num_dumps, stop_room = 0;
- num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
+ num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 7521c9be735b..ee04e916fa21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -14,7 +14,7 @@ struct mlx5e_accel_tx_tls_state {
u32 tls_tisn;
};
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq);
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 6982b193ee8a..f51c04284e4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -385,15 +385,13 @@ void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
*cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
}
-u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
-
if (!mlx5_accel_is_tls_device(mdev))
return 0;
if (mlx5_accel_is_ktls_device(mdev))
- return mlx5e_ktls_get_stop_room(sq);
+ return mlx5e_ktls_get_stop_room(params);
/* FPGA */
/* Resync SKB. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 5f162ad2ee8f..9923132c9440 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -43,7 +43,7 @@
#include "en.h"
#include "en/txrx.h"
-u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq);
+u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
@@ -71,7 +71,7 @@ mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false;
static inline void
mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
-static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d25a56ec6876..42e61dc28ead 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "en/port.h"
+#include "en/params.h"
#include "en/xsk/pool.h"
#include "lib/clock.h"
@@ -369,6 +370,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
new_channels.params.log_rq_mtu_frames = log_rq_size;
new_channels.params.log_sq_size = log_sq_size;
+ err = mlx5e_validate_params(priv, &new_channels.params);
+ if (err)
+ goto unlock;
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ebce97921e03..527c5f12c5af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1121,28 +1121,6 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
return 0;
}
-static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
-{
- int sq_size = 1 << log_sq_size;
-
- sq->stop_room = mlx5e_tls_get_stop_room(sq);
- sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
- if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state))
- /* A MPWQE can take up to the maximum-sized WQE + all the normal
- * stop room can be taken if a new packet breaks the active
- * MPWQE session and allocates its WQEs right away.
- */
- sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-
- if (WARN_ON(sq->stop_room >= sq_size)) {
- netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
- sq->stop_room, sq_size);
- return -ENOSPC;
- }
-
- return 0;
-}
-
static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
@@ -1176,9 +1154,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
- err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
- if (err)
- return err;
+ sq->stop_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -2225,6 +2201,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
+ param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
mlx5e_build_tx_cq_param(priv, params, &param->cqp);
}
@@ -3999,6 +3976,9 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
+ err = mlx5e_validate_params(priv, &new_channels.params);
+ if (err)
+ goto out;
if (params->xdp_prog &&
!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 8ebfe782f95e..4ea5d6ddf56a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -189,19 +189,21 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
return count_eqe;
}
-static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, unsigned long *flags)
+static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
+ unsigned long *flags)
__acquires(&eq->lock)
{
- if (in_irq())
+ if (!recovery)
spin_lock(&eq->lock);
else
spin_lock_irqsave(&eq->lock, *flags);
}
-static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, unsigned long *flags)
+static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
+ unsigned long *flags)
__releases(&eq->lock)
{
- if (in_irq())
+ if (!recovery)
spin_unlock(&eq->lock);
else
spin_unlock_irqrestore(&eq->lock, *flags);
@@ -223,11 +225,13 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
struct mlx5_eqe *eqe;
unsigned long flags;
int num_eqes = 0;
+ bool recovery;
dev = eq->dev;
eqt = dev->priv.eq_table;
- mlx5_eq_async_int_lock(eq_async, &flags);
+ recovery = action == ASYNC_EQ_RECOVER;
+ mlx5_eq_async_int_lock(eq_async, recovery, &flags);
eqe = next_eqe_sw(eq);
if (!eqe)
@@ -249,9 +253,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
out:
eq_update_ci(eq, 1);
- mlx5_eq_async_int_unlock(eq_async, &flags);
+ mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
- return unlikely(action == ASYNC_EQ_RECOVER) ? num_eqes : 0;
+ return unlikely(recovery) ? num_eqes : 0;
}
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
index 656f96be6e20..89ef592656c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
@@ -47,11 +47,12 @@
/**
* enum mlx5_fpga_access_type - Enumerated the different methods possible for
* accessing the device memory address space
+ *
+ * @MLX5_FPGA_ACCESS_TYPE_I2C: Use the slow CX-FPGA I2C bus
+ * @MLX5_FPGA_ACCESS_TYPE_DONTCARE: Use the fastest available method
*/
enum mlx5_fpga_access_type {
- /** Use the slow CX-FPGA I2C bus */
MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
- /** Use the fastest available method */
MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0,
};
@@ -113,6 +114,7 @@ struct mlx5_fpga_conn_attr {
* subsequent receives.
*/
void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
+ /** @cb_arg: A context to be passed to recv_cb callback */
void *cb_arg;
};
@@ -145,7 +147,7 @@ void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn);
/**
* mlx5_fpga_sbu_conn_sendmsg() - Queue the transmission of a packet
- * @fdev: An FPGA SBU connection
+ * @conn: An FPGA SBU connection
* @buf: The packet buffer
*
* Queues a packet for transmission over an FPGA SBU connection.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
new file mode 100644
index 000000000000..7df11a019df9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 - 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006 - 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include "dr_types.h"
+
+int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int max_order)
+{
+ int i;
+
+ buddy->max_order = max_order;
+
+ INIT_LIST_HEAD(&buddy->list_node);
+ INIT_LIST_HEAD(&buddy->used_list);
+ INIT_LIST_HEAD(&buddy->hot_list);
+
+ buddy->bitmap = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->bitmap),
+ GFP_KERNEL);
+ buddy->num_free = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->num_free),
+ GFP_KERNEL);
+
+ if (!buddy->bitmap || !buddy->num_free)
+ goto err_free_all;
+
+ /* Allocating max_order bitmaps, one for each order */
+
+ for (i = 0; i <= buddy->max_order; ++i) {
+ unsigned int size = 1 << (buddy->max_order - i);
+
+ buddy->bitmap[i] = bitmap_zalloc(size, GFP_KERNEL);
+ if (!buddy->bitmap[i])
+ goto err_out_free_each_bit_per_order;
+ }
+
+ /* In the beginning, we have only one order that is available for
+ * use (the biggest one), so mark the first bit in both bitmaps.
+ */
+
+ bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free_each_bit_per_order:
+ for (i = 0; i <= buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+err_free_all:
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+ return -ENOMEM;
+}
+
+void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ int i;
+
+ list_del(&buddy->list_node);
+
+ for (i = 0; i <= buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+}
+
+static int dr_buddy_find_free_seg(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int start_order,
+ unsigned int *segment,
+ unsigned int *order)
+{
+ unsigned int seg, order_iter, m;
+
+ for (order_iter = start_order;
+ order_iter <= buddy->max_order; ++order_iter) {
+ if (!buddy->num_free[order_iter])
+ continue;
+
+ m = 1 << (buddy->max_order - order_iter);
+ seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+ if (WARN(seg >= m,
+ "ICM Buddy: failed finding free mem for order %d\n",
+ order_iter))
+ return -ENOMEM;
+
+ break;
+ }
+
+ if (order_iter > buddy->max_order)
+ return -ENOMEM;
+
+ *segment = seg;
+ *order = order_iter;
+ return 0;
+}
+
+/**
+ * mlx5dr_buddy_alloc_mem() - Update second level bitmap.
+ * @buddy: Buddy to update.
+ * @order: Order of the buddy to update.
+ * @segment: Segment number.
+ *
+ * This function finds the first area of the ICM memory managed by this buddy.
+ * It uses the data structures of the buddy system in order to find the first
+ * area of free place, starting from the current order till the maximum order
+ * in the system.
+ *
+ * Return: 0 when segment is set, non-zero error status otherwise.
+ *
+ * The function returns the location (segment) in the whole buddy ICM memory
+ * area - the index of the memory segment that is available for use.
+ */
+int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int order,
+ unsigned int *segment)
+{
+ unsigned int seg, order_iter;
+ int err;
+
+ err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+ if (err)
+ return err;
+
+ bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+ --buddy->num_free[order_iter];
+
+ /* If we found free memory in some order that is bigger than the
+ * required order, we need to split every order between the required
+ * order and the order that we found into two parts, and mark accordingly.
+ */
+ while (order_iter > order) {
+ --order_iter;
+ seg <<= 1;
+ bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+ ++buddy->num_free[order_iter];
+ }
+
+ seg <<= order;
+ *segment = seg;
+
+ return 0;
+}
+
+void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int seg, unsigned int order)
+{
+ seg >>= order;
+
+ /* Whenever a segment is free,
+ * the mem is added to the buddy that gave it.
+ */
+ while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+ bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+ bitmap_set(buddy->bitmap[order], seg, 1);
+
+ ++buddy->num_free[order];
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 6bd34b293007..ebc879052e42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -93,12 +93,12 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
- if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
}
- if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) {
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
caps->flex_parser_id_icmpv6_dw0 =
MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
caps->flex_parser_id_icmpv6_dw1 =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index cc33515b9aba..66c24767e3b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,50 +4,16 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024)
-
-struct mlx5dr_icm_pool;
-
-struct mlx5dr_icm_bucket {
- struct mlx5dr_icm_pool *pool;
-
- /* Chunks that aren't visible to HW not directly and not in cache */
- struct list_head free_list;
- unsigned int free_list_count;
-
- /* Used chunks, HW may be accessing this memory */
- struct list_head used_list;
- unsigned int used_list_count;
-
- /* HW may be accessing this memory but at some future,
- * undetermined time, it might cease to do so. Before deciding to call
- * sync_ste, this list is moved to sync_list
- */
- struct list_head hot_list;
- unsigned int hot_list_count;
-
- /* Pending sync list, entries from the hot list are moved to this list.
- * sync_ste is executed and then sync_list is concatenated to the free list
- */
- struct list_head sync_list;
- unsigned int sync_list_count;
-
- u32 total_chunks;
- u32 num_of_entries;
- u32 entry_size;
- /* protect the ICM bucket */
- struct mutex mutex;
-};
+#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
struct mlx5dr_icm_pool {
- struct mlx5dr_icm_bucket *buckets;
enum mlx5dr_icm_type icm_type;
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
- enum mlx5dr_icm_chunk_size num_of_buckets;
- struct list_head icm_mr_list;
- /* protect the ICM MR list */
- struct mutex mr_mutex;
struct mlx5dr_domain *dmn;
+ /* memory management */
+ struct mutex mutex; /* protect the ICM pool and ICM buddy */
+ struct list_head buddy_mem_list;
+ u64 hot_memory_size;
};
struct mlx5dr_icm_dm {
@@ -58,13 +24,11 @@ struct mlx5dr_icm_dm {
};
struct mlx5dr_icm_mr {
- struct mlx5dr_icm_pool *pool;
struct mlx5_core_mkey mkey;
struct mlx5dr_icm_dm dm;
- size_t used_length;
+ struct mlx5dr_domain *dmn;
size_t length;
u64 icm_start_addr;
- struct list_head mr_list;
};
static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
@@ -107,8 +71,7 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
if (!icm_mr)
return NULL;
- icm_mr->pool = pool;
- INIT_LIST_HEAD(&icm_mr->mr_list);
+ icm_mr->dmn = pool->dmn;
icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool->icm_type);
@@ -150,8 +113,6 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
goto free_mkey;
}
- list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
-
return icm_mr;
free_mkey:
@@ -166,10 +127,9 @@ free_icm_mr:
static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
{
- struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev;
+ struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
struct mlx5dr_icm_dm *dm = &icm_mr->dm;
- list_del(&icm_mr->mr_list);
mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
dm->addr, dm->obj_id);
@@ -178,19 +138,17 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
{
- struct mlx5dr_icm_bucket *bucket = chunk->bucket;
-
- chunk->ste_arr = kvzalloc(bucket->num_of_entries *
+ chunk->ste_arr = kvzalloc(chunk->num_of_entries *
sizeof(chunk->ste_arr[0]), GFP_KERNEL);
if (!chunk->ste_arr)
return -ENOMEM;
- chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries *
+ chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
DR_STE_SIZE_REDUCED, GFP_KERNEL);
if (!chunk->hw_ste_arr)
goto out_free_ste_arr;
- chunk->miss_list = kvmalloc(bucket->num_of_entries *
+ chunk->miss_list = kvmalloc(chunk->num_of_entries *
sizeof(chunk->miss_list[0]), GFP_KERNEL);
if (!chunk->miss_list)
goto out_free_hw_ste_arr;
@@ -204,72 +162,6 @@ out_free_ste_arr:
return -ENOMEM;
}
-static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
-{
- size_t mr_free_size, mr_req_size, mr_row_size;
- struct mlx5dr_icm_pool *pool = bucket->pool;
- struct mlx5dr_icm_mr *icm_mr = NULL;
- struct mlx5dr_icm_chunk *chunk;
- int i, err = 0;
-
- mr_req_size = bucket->num_of_entries * bucket->entry_size;
- mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type);
- mutex_lock(&pool->mr_mutex);
- if (!list_empty(&pool->icm_mr_list)) {
- icm_mr = list_last_entry(&pool->icm_mr_list,
- struct mlx5dr_icm_mr, mr_list);
-
- if (icm_mr)
- mr_free_size = icm_mr->dm.length - icm_mr->used_length;
- }
-
- if (!icm_mr || mr_free_size < mr_row_size) {
- icm_mr = dr_icm_pool_mr_create(pool);
- if (!icm_mr) {
- err = -ENOMEM;
- goto out_err;
- }
- }
-
- /* Create memory aligned chunks */
- for (i = 0; i < mr_row_size / mr_req_size; i++) {
- chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
- if (!chunk) {
- err = -ENOMEM;
- goto out_err;
- }
-
- chunk->bucket = bucket;
- chunk->rkey = icm_mr->mkey.key;
- /* mr start addr is zero based */
- chunk->mr_addr = icm_mr->used_length;
- chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length;
- icm_mr->used_length += mr_req_size;
- chunk->num_of_entries = bucket->num_of_entries;
- chunk->byte_size = chunk->num_of_entries * bucket->entry_size;
-
- if (pool->icm_type == DR_ICM_TYPE_STE) {
- err = dr_icm_chunk_ste_init(chunk);
- if (err)
- goto out_free_chunk;
- }
-
- INIT_LIST_HEAD(&chunk->chunk_list);
- list_add(&chunk->chunk_list, &bucket->free_list);
- bucket->free_list_count++;
- bucket->total_chunks++;
- }
- mutex_unlock(&pool->mr_mutex);
- return 0;
-
-out_free_chunk:
- kvfree(chunk);
-out_err:
- mutex_unlock(&pool->mr_mutex);
- return err;
-}
-
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
kvfree(chunk->miss_list);
@@ -277,166 +169,199 @@ static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
kvfree(chunk->ste_arr);
}
-static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
+static enum mlx5dr_icm_type
+get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk)
+{
+ return chunk->buddy_mem->pool->icm_type;
+}
+
+static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
+ struct mlx5dr_icm_buddy_mem *buddy)
{
- struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+ enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
+ buddy->used_memory -= chunk->byte_size;
list_del(&chunk->chunk_list);
- bucket->total_chunks--;
- if (bucket->pool->icm_type == DR_ICM_TYPE_STE)
+ if (icm_type == DR_ICM_TYPE_STE)
dr_icm_chunk_ste_cleanup(chunk);
kvfree(chunk);
}
-static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *bucket,
- enum mlx5dr_icm_chunk_size chunk_size)
+static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
{
- if (pool->icm_type == DR_ICM_TYPE_STE)
- bucket->entry_size = DR_STE_SIZE;
- else
- bucket->entry_size = DR_MODIFY_ACTION_SIZE;
-
- bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
- bucket->pool = pool;
- mutex_init(&bucket->mutex);
- INIT_LIST_HEAD(&bucket->free_list);
- INIT_LIST_HEAD(&bucket->used_list);
- INIT_LIST_HEAD(&bucket->hot_list);
- INIT_LIST_HEAD(&bucket->sync_list);
+ struct mlx5dr_icm_buddy_mem *buddy;
+ struct mlx5dr_icm_mr *icm_mr;
+
+ icm_mr = dr_icm_pool_mr_create(pool);
+ if (!icm_mr)
+ return -ENOMEM;
+
+ buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL);
+ if (!buddy)
+ goto free_mr;
+
+ if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
+ goto err_free_buddy;
+
+ buddy->icm_mr = icm_mr;
+ buddy->pool = pool;
+
+ /* add it to the -start- of the list in order to search in it first */
+ list_add(&buddy->list_node, &pool->buddy_mem_list);
+
+ return 0;
+
+err_free_buddy:
+ kvfree(buddy);
+free_mr:
+ dr_icm_pool_mr_destroy(icm_mr);
+ return -ENOMEM;
}
-static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket)
+static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
{
struct mlx5dr_icm_chunk *chunk, *next;
- mutex_destroy(&bucket->mutex);
- list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
- list_splice_tail_init(&bucket->hot_list, &bucket->free_list);
+ list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list)
+ dr_icm_chunk_destroy(chunk, buddy);
+
+ list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
+ dr_icm_chunk_destroy(chunk, buddy);
- list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list)
- dr_icm_chunk_destroy(chunk);
+ dr_icm_pool_mr_destroy(buddy->icm_mr);
- WARN_ON(bucket->total_chunks != 0);
+ mlx5dr_buddy_cleanup(buddy);
- /* Cleanup of unreturned chunks */
- list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list)
- dr_icm_chunk_destroy(chunk);
+ kvfree(buddy);
}
-static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool)
+static struct mlx5dr_icm_chunk *
+dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
+ unsigned int seg)
{
- u64 hot_size = 0;
- int chunk_order;
+ struct mlx5dr_icm_chunk *chunk;
+ int offset;
- for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++)
- hot_size += pool->buckets[chunk_order].hot_list_count *
- mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type);
+ chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ return NULL;
- return hot_size;
-}
+ offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
+
+ chunk->rkey = buddy_mem_pool->icm_mr->mkey.key;
+ chunk->mr_addr = offset;
+ chunk->icm_addr =
+ (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
+ chunk->num_of_entries =
+ mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
+ chunk->byte_size =
+ mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
+ chunk->seg = seg;
+
+ if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
+ mlx5dr_err(pool->dmn,
+ "Failed to init ste arrays (order: %d)\n",
+ chunk_size);
+ goto out_free_chunk;
+ }
-static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *bucket)
-{
- u64 bytes_for_sync;
+ buddy_mem_pool->used_memory += chunk->byte_size;
+ chunk->buddy_mem = buddy_mem_pool;
+ INIT_LIST_HEAD(&chunk->chunk_list);
- bytes_for_sync = dr_icm_hot_mem_size(pool);
- if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count)
- return false;
+ /* chunk now is part of the used_list */
+ list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
- return true;
-}
+ return chunk;
-static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket)
-{
- list_splice_tail_init(&bucket->hot_list, &bucket->sync_list);
- bucket->sync_list_count += bucket->hot_list_count;
- bucket->hot_list_count = 0;
+out_free_chunk:
+ kvfree(chunk);
+ return NULL;
}
-static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket)
+static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
- list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
- bucket->free_list_count += bucket->sync_list_count;
- bucket->sync_list_count = 0;
-}
+ if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
+ return true;
-static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket)
-{
- list_splice_tail_init(&bucket->sync_list, &bucket->hot_list);
- bucket->hot_list_count += bucket->sync_list_count;
- bucket->sync_list_count = 0;
+ return false;
}
-static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *cb,
- bool buckets[DR_CHUNK_SIZE_MAX])
+static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
{
- struct mlx5dr_icm_bucket *bucket;
- int i;
-
- for (i = 0; i < pool->num_of_buckets; i++) {
- bucket = &pool->buckets[i];
- if (bucket == cb) {
- dr_icm_chill_bucket_start(bucket);
- continue;
- }
+ struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+ int err;
- /* Freeing the mutex is done at the end of that process, after
- * sync_ste was executed at dr_icm_chill_buckets_end func.
- */
- if (mutex_trylock(&bucket->mutex)) {
- dr_icm_chill_bucket_start(bucket);
- buckets[i] = true;
- }
+ err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
+ if (err) {
+ mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
+ return err;
}
-}
-static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *cb,
- bool buckets[DR_CHUNK_SIZE_MAX])
-{
- struct mlx5dr_icm_bucket *bucket;
- int i;
-
- for (i = 0; i < pool->num_of_buckets; i++) {
- bucket = &pool->buckets[i];
- if (bucket == cb) {
- dr_icm_chill_bucket_end(bucket);
- continue;
- }
+ list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
+ struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
- if (!buckets[i])
- continue;
+ list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
+ mlx5dr_buddy_free_mem(buddy, chunk->seg,
+ ilog2(chunk->num_of_entries));
+ pool->hot_memory_size -= chunk->byte_size;
+ dr_icm_chunk_destroy(chunk, buddy);
+ }
- dr_icm_chill_bucket_end(bucket);
- mutex_unlock(&bucket->mutex);
+ if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_buddy_destroy(buddy);
}
+
+ return 0;
}
-static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *cb,
- bool buckets[DR_CHUNK_SIZE_MAX])
+static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ struct mlx5dr_icm_buddy_mem **buddy,
+ unsigned int *seg)
{
- struct mlx5dr_icm_bucket *bucket;
- int i;
-
- for (i = 0; i < pool->num_of_buckets; i++) {
- bucket = &pool->buckets[i];
- if (bucket == cb) {
- dr_icm_chill_bucket_abort(bucket);
- continue;
- }
+ struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
+ bool new_mem = false;
+ int err;
- if (!buckets[i])
- continue;
+alloc_buddy_mem:
+ /* find the next free place from the buddy list */
+ list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
+ err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
+ chunk_size, seg);
+ if (!err)
+ goto found;
+
+ if (WARN_ON(new_mem)) {
+ /* We have new memory pool, first in the list */
+ mlx5dr_err(pool->dmn,
+ "No memory for order: %d\n",
+ chunk_size);
+ goto out;
+ }
+ }
- dr_icm_chill_bucket_abort(bucket);
- mutex_unlock(&bucket->mutex);
+ /* no more available allocators in that pool, create new */
+ err = dr_icm_buddy_create(pool);
+ if (err) {
+ mlx5dr_err(pool->dmn,
+ "Failed creating buddy for order %d\n",
+ chunk_size);
+ goto out;
}
+
+ /* mark we have new memory, first in list */
+ new_mem = true;
+ goto alloc_buddy_mem;
+
+found:
+ *buddy = buddy_mem_pool;
+out:
+ return err;
}
/* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
@@ -446,68 +371,48 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size)
{
- struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */
- bool buckets[DR_CHUNK_SIZE_MAX] = {};
- struct mlx5dr_icm_bucket *bucket;
- int err;
+ struct mlx5dr_icm_chunk *chunk = NULL;
+ struct mlx5dr_icm_buddy_mem *buddy;
+ unsigned int seg;
+ int ret;
if (chunk_size > pool->max_log_chunk_sz)
return NULL;
- bucket = &pool->buckets[chunk_size];
-
- mutex_lock(&bucket->mutex);
-
- /* Take chunk from pool if available, otherwise allocate new chunks */
- if (list_empty(&bucket->free_list)) {
- if (dr_icm_reuse_hot_entries(pool, bucket)) {
- dr_icm_chill_buckets_start(pool, bucket, buckets);
- err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
- if (err) {
- dr_icm_chill_buckets_abort(pool, bucket, buckets);
- mlx5dr_err(pool->dmn, "Sync_steering failed\n");
- chunk = NULL;
- goto out;
- }
- dr_icm_chill_buckets_end(pool, bucket, buckets);
- } else {
- dr_icm_chunks_create(bucket);
- }
- }
+ mutex_lock(&pool->mutex);
+ /* find mem, get back the relevant buddy pool and seg in that mem */
+ ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
+ if (ret)
+ goto out;
- if (!list_empty(&bucket->free_list)) {
- chunk = list_last_entry(&bucket->free_list,
- struct mlx5dr_icm_chunk,
- chunk_list);
- if (chunk) {
- list_del_init(&chunk->chunk_list);
- list_add_tail(&chunk->chunk_list, &bucket->used_list);
- bucket->free_list_count--;
- bucket->used_list_count++;
- }
- }
+ chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg);
+ if (!chunk)
+ goto out_err;
+
+ goto out;
+
+out_err:
+ mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
out:
- mutex_unlock(&bucket->mutex);
+ mutex_unlock(&pool->mutex);
return chunk;
}
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
{
- struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ struct mlx5dr_icm_pool *pool = buddy->pool;
- if (bucket->pool->icm_type == DR_ICM_TYPE_STE) {
- memset(chunk->ste_arr, 0,
- bucket->num_of_entries * sizeof(chunk->ste_arr[0]));
- memset(chunk->hw_ste_arr, 0,
- bucket->num_of_entries * DR_STE_SIZE_REDUCED);
- }
+ /* move the memory to the waiting list AKA "hot" */
+ mutex_lock(&pool->mutex);
+ list_move_tail(&chunk->chunk_list, &buddy->hot_list);
+ pool->hot_memory_size += chunk->byte_size;
+
+ /* Check if we have chunks that are waiting for sync-ste */
+ if (dr_icm_pool_is_sync_required(pool))
+ dr_icm_pool_sync_all_buddy_pools(pool);
- mutex_lock(&bucket->mutex);
- list_del_init(&chunk->chunk_list);
- list_add_tail(&chunk->chunk_list, &bucket->hot_list);
- bucket->hot_list_count++;
- bucket->used_list_count--;
- mutex_unlock(&bucket->mutex);
+ mutex_unlock(&pool->mutex);
}
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
@@ -515,7 +420,6 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
{
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_icm_pool *pool;
- int i;
if (icm_type == DR_ICM_TYPE_STE)
max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
@@ -526,43 +430,24 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
if (!pool)
return NULL;
- pool->buckets = kcalloc(max_log_chunk_sz + 1,
- sizeof(pool->buckets[0]),
- GFP_KERNEL);
- if (!pool->buckets)
- goto free_pool;
-
pool->dmn = dmn;
pool->icm_type = icm_type;
pool->max_log_chunk_sz = max_log_chunk_sz;
- pool->num_of_buckets = max_log_chunk_sz + 1;
- INIT_LIST_HEAD(&pool->icm_mr_list);
- for (i = 0; i < pool->num_of_buckets; i++)
- dr_icm_bucket_init(pool, &pool->buckets[i], i);
+ INIT_LIST_HEAD(&pool->buddy_mem_list);
- mutex_init(&pool->mr_mutex);
+ mutex_init(&pool->mutex);
return pool;
-
-free_pool:
- kvfree(pool);
- return NULL;
}
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
{
- struct mlx5dr_icm_mr *icm_mr, *next;
- int i;
-
- mutex_destroy(&pool->mr_mutex);
-
- list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list)
- dr_icm_pool_mr_destroy(icm_mr);
+ struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
- for (i = 0; i < pool->num_of_buckets; i++)
- dr_icm_bucket_cleanup(&pool->buckets[i]);
+ list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
+ dr_icm_buddy_destroy(buddy);
- kfree(pool->buckets);
+ mutex_destroy(&pool->mutex);
kvfree(pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 7df883686d46..cb5202e17856 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -85,7 +85,7 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
(_misc2)._inner_outer##_first_mpls_s_bos || \
(_misc2)._inner_outer##_first_mpls_ttl)
-static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
+static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
{
return (misc->gre_key_h || misc->gre_key_l ||
misc->gre_protocol || misc->gre_c_present ||
@@ -98,12 +98,12 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
(_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \
(_misc2).outer_first_mpls_over_##gre_udp##_ttl)
-#define DR_MASK_IS_FLEX_PARSER_0_SET(_misc2) ( \
+#define DR_MASK_IS_TNL_MPLS_SET(_misc2) ( \
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
static bool
-dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
+dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
{
return (misc3->outer_vxlan_gpe_vni ||
misc3->outer_vxlan_gpe_next_protocol ||
@@ -111,21 +111,20 @@ dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
}
static bool
-dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
+dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols &
- MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+ return caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
}
static bool
-dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask,
- struct mlx5dr_domain *dmn)
+dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
{
- return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) &&
- dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps);
+ return dr_mask_is_vxlan_gpe_set(&mask->misc3) &&
+ dr_matcher_supp_vxlan_gpe(&dmn->info.caps);
}
-static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc)
+static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
{
return misc->geneve_vni ||
misc->geneve_oam ||
@@ -134,26 +133,46 @@ static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc)
}
static bool
-dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps)
+dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols &
- MLX5_FLEX_PARSER_GENEVE_ENABLED;
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED;
}
static bool
-dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask,
- struct mlx5dr_domain *dmn)
+dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
{
- return dr_mask_is_misc_geneve_set(&mask->misc) &&
- dr_matcher_supp_flex_parser_geneve(&dmn->info.caps);
+ return dr_mask_is_tnl_geneve_set(&mask->misc) &&
+ dr_matcher_supp_tnl_geneve(&dmn->info.caps);
}
-static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
+static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
+}
+
+static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
+}
+
+static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
{
return (misc3->icmpv6_type || misc3->icmpv6_code ||
misc3->icmpv6_header_data);
}
+static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ if (DR_MASK_IS_ICMPV4_SET(&mask->misc3))
+ return dr_matcher_supp_icmp_v4(&dmn->info.caps);
+ else if (dr_mask_is_icmpv6_set(&mask->misc3))
+ return dr_matcher_supp_icmp_v6(&dmn->info.caps);
+
+ return false;
+}
+
static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
{
return misc2->metadata_reg_a;
@@ -257,7 +276,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
- mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
+ mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask,
inner, rx);
}
@@ -277,8 +296,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
+ inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
@@ -289,14 +308,12 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx);
}
- if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn))
- mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++],
- &mask,
- inner, rx);
- else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn))
- mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++],
- &mask,
- inner, rx);
+ if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
+ mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask,
+ inner, rx);
+ else if (dr_mask_is_tnl_geneve(&mask, dmn))
+ mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask,
+ inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
@@ -304,22 +321,18 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
- if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2))
- mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask,
- inner, rx);
+ if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
+ mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
- if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(&mask.misc3) &&
- mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) ||
- (dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) &&
- mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) {
- ret = mlx5dr_ste_build_flex_parser_1(&sb[idx++],
- &mask, &dmn->info.caps,
- inner, rx);
+ if (dr_mask_is_icmp(&mask, dmn)) {
+ ret = mlx5dr_ste_build_icmp(&sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
if (ret)
return ret;
}
- if (dr_mask_is_gre_set(&mask.misc))
- mlx5dr_ste_build_gre(&sb[idx++], &mask, inner, rx);
+ if (dr_mask_is_tnl_gre_set(&mask.misc))
+ mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx);
}
/* Inner */
@@ -334,7 +347,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
- mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
+ mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++],
&mask, inner, rx);
}
@@ -354,8 +367,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
+ inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
@@ -372,8 +385,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
- if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2))
- mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, inner, rx);
+ if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
+ mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
}
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index b01aaec75622..d275823bff2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -1090,7 +1090,7 @@ static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
@@ -1594,9 +1594,9 @@ static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
@@ -1693,8 +1693,8 @@ static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask, bool inner, bool rx)
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask, bool inner, bool rx)
{
dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
@@ -1771,9 +1771,9 @@ static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
@@ -1792,8 +1792,8 @@ static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
u8 *bit_mask)
{
+ bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
- bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
u32 icmp_header_data_mask;
u32 icmp_type_mask;
u32 icmp_code_mask;
@@ -1869,7 +1869,7 @@ static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
u32 icmp_code;
bool is_ipv4;
- is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
+ is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
if (is_ipv4) {
icmp_header_data = misc_3->icmpv4_header_data;
icmp_type = misc_3->icmpv4_type;
@@ -1928,10 +1928,10 @@ static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- bool inner, bool rx)
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
{
int ret;
@@ -2069,9 +2069,9 @@ dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
sb->bit_mask);
@@ -2122,9 +2122,9 @@ dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index f50f3b107aa3..3e423c8ed22f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -114,7 +114,7 @@ enum mlx5dr_ipv {
struct mlx5dr_icm_pool;
struct mlx5dr_icm_chunk;
-struct mlx5dr_icm_bucket;
+struct mlx5dr_icm_buddy_mem;
struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
@@ -288,7 +288,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
-void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
@@ -312,31 +312,31 @@ void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
@@ -588,9 +588,9 @@ struct mlx5dr_match_param {
struct mlx5dr_match_misc3 misc3;
};
-#define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
- (_misc3)->icmpv4_code || \
- (_misc3)->icmpv4_header_data)
+#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
+ (_misc3)->icmpv4_code || \
+ (_misc3)->icmpv4_header_data)
struct mlx5dr_esw_caps {
u64 drop_icm_address_rx;
@@ -731,7 +731,6 @@ struct mlx5dr_action {
struct mlx5dr_domain *dmn;
struct mlx5dr_icm_chunk *chunk;
u8 *data;
- u32 data_size;
u16 num_of_actions;
u32 index;
u8 allow_rx:1;
@@ -804,7 +803,7 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
struct mlx5dr_ste *ste);
struct mlx5dr_icm_chunk {
- struct mlx5dr_icm_bucket *bucket;
+ struct mlx5dr_icm_buddy_mem *buddy_mem;
struct list_head chunk_list;
u32 rkey;
u32 num_of_entries;
@@ -812,6 +811,11 @@ struct mlx5dr_icm_chunk {
u64 icm_addr;
u64 mr_addr;
+ /* indicates the index of this chunk in the whole memory,
+ * used for deleting the chunk from the buddy
+ */
+ unsigned int seg;
+
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
u8 *hw_ste_arr;
@@ -840,23 +844,20 @@ static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
mlx5dr_domain_nic_unlock(&dmn->info.rx);
}
-static inline int
-mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
-{
- return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
-}
-
-static inline int
-mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
-{
- return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
-}
-
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
enum mlx5dr_ipv inner_ipv);
+static inline int
+mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
+{
+ if (icm_type == DR_ICM_TYPE_STE)
+ return DR_STE_SIZE;
+
+ return DR_MODIFY_ACTION_SIZE;
+}
+
static inline u32
mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
{
@@ -870,11 +871,7 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
int num_of_entries;
int entry_size;
- if (icm_type == DR_ICM_TYPE_STE)
- entry_size = DR_STE_SIZE;
- else
- entry_size = DR_MODIFY_ACTION_SIZE;
-
+ entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type);
num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
return entry_size * num_of_entries;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 7914fe3fc68d..4177786b8eaf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -127,4 +127,36 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
}
+/* buddy functions & structure */
+
+struct mlx5dr_icm_mr;
+
+struct mlx5dr_icm_buddy_mem {
+ unsigned long **bitmap;
+ unsigned int *num_free;
+ u32 max_order;
+ struct list_head list_node;
+ struct mlx5dr_icm_mr *icm_mr;
+ struct mlx5dr_icm_pool *pool;
+
+ /* This is the list of used chunks. HW may be accessing this memory */
+ struct list_head used_list;
+ u64 used_memory;
+
+ /* Hardware may be accessing this memory but at some future,
+ * undetermined time, it might cease to do so.
+ * sync_ste command sets them free.
+ */
+ struct list_head hot_list;
+};
+
+int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int max_order);
+void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy);
+int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int order,
+ unsigned int *segment);
+void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int seg, unsigned int order);
+
#endif /* _MLX5DR_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 39eff6a57ba2..fcf9095b3f55 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -7279,10 +7279,11 @@ static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
- u32 dip)
+ u32 *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- mlxsw_reg_ralue_dip4_set(payload, dip);
+ if (dip)
+ mlxsw_reg_ralue_dip4_set(payload, *dip);
}
static inline void mlxsw_reg_ralue_pack6(char *payload,
@@ -7292,7 +7293,8 @@ static inline void mlxsw_reg_ralue_pack6(char *payload,
const void *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
+ if (dip)
+ mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
}
static inline void
@@ -8245,6 +8247,86 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
+/* Note that XRALXX register position violates the rule of ordering register
+ * definition by the ID. However, XRALXX pack helpers are using RALXX pack
+ * helpers, RALXX registers have higher IDs.
+ */
+
+/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
+ * -----------------------------------------------------------
+ * The XRALTA is used to allocate the XLT LPM trees.
+ *
+ * This register embeds original RALTA register.
+ */
+#define MLXSW_REG_XRALTA_ID 0x7811
+#define MLXSW_REG_XRALTA_LEN 0x08
+#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
+
+static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
+
+ MLXSW_REG_ZERO(xralta, payload);
+ mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
+}
+
+/* XRALST - XM Router Algorithmic LPM Structure Tree Register
+ * ----------------------------------------------------------
+ * The XRALST is used to set and query the structure of an XLT LPM tree.
+ *
+ * This register embeds original RALST register.
+ */
+#define MLXSW_REG_XRALST_ID 0x7812
+#define MLXSW_REG_XRALST_LEN 0x108
+#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
+
+static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
+{
+ char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
+
+ MLXSW_REG_ZERO(xralst, payload);
+ mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
+}
+
+static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
+ u8 left_child_bin,
+ u8 right_child_bin)
+{
+ char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
+
+ mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
+ right_child_bin);
+}
+
+/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
+ * --------------------------------------------------------
+ * The XRALTB register is used to bind virtual router and protocol
+ * to an allocated LPM tree.
+ *
+ * This register embeds original RALTB register.
+ */
+#define MLXSW_REG_XRALTB_ID 0x7813
+#define MLXSW_REG_XRALTB_LEN 0x08
+#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
+
+static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
+
+ MLXSW_REG_ZERO(xraltb, payload);
+ mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -11195,6 +11277,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rigr2),
MLXSW_REG(recr2),
MLXSW_REG(rmft2),
+ MLXSW_REG(xralta),
+ MLXSW_REG(xralst),
+ MLXSW_REG(xraltb),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index a8525992528f..089d99535f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -181,23 +181,26 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_ipip_fib_entry_op_gre4_ralue(struct mlxsw_sp *mlxsw_sp,
- u32 dip, u8 prefix_len, u16 ul_vr_id,
- enum mlxsw_reg_ralue_op op,
- u32 tunnel_index)
+mlxsw_sp_ipip_fib_entry_op_gre4_do(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ u32 dip, u8 prefix_len, u16 ul_vr_id,
+ enum mlxsw_sp_fib_entry_op op,
+ u32 tunnel_index,
+ struct mlxsw_sp_fib_entry_priv *priv)
{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
-
- mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_REG_RALXX_PROTOCOL_IPV4, op,
- ul_vr_id, prefix_len, dip);
- mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, tunnel_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ ll_ops->fib_entry_pack(op_ctx, MLXSW_SP_L3_PROTO_IPV4, op, ul_vr_id,
+ prefix_len, (unsigned char *) &dip, priv);
+ ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx, tunnel_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry,
- enum mlxsw_reg_ralue_op op,
- u32 tunnel_index)
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ enum mlxsw_sp_fib_entry_op op, u32 tunnel_index,
+ struct mlxsw_sp_fib_entry_priv *priv)
{
u16 ul_vr_id = mlxsw_sp_ipip_lb_ul_vr_id(ipip_entry->ol_lb);
__be32 dip;
@@ -210,9 +213,8 @@ static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp,
dip = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ipip_entry->ol_dev).addr4;
- return mlxsw_sp_ipip_fib_entry_op_gre4_ralue(mlxsw_sp, be32_to_cpu(dip),
- 32, ul_vr_id, op,
- tunnel_index);
+ return mlxsw_sp_ipip_fib_entry_op_gre4_do(mlxsw_sp, ll_ops, op_ctx, be32_to_cpu(dip),
+ 32, ul_vr_id, op, tunnel_index, priv);
}
static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
@@ -231,8 +233,7 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
}
static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ol_dev,
- enum mlxsw_sp_l3proto ol_proto)
+ const struct net_device *ol_dev)
{
struct ip_tunnel *tunnel = netdev_priv(ol_dev);
__be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index bb5c4d4a5872..d32702cb6ab4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -43,8 +43,7 @@ struct mlxsw_sp_ipip_ops {
struct mlxsw_sp_ipip_entry *ipip_entry);
bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ol_dev,
- enum mlxsw_sp_l3proto ol_proto);
+ const struct net_device *ol_dev);
/* Return a configuration for creating an overlay loopback RIF. */
struct mlxsw_sp_rif_ipip_lb_config
@@ -52,9 +51,12 @@ struct mlxsw_sp_ipip_ops {
const struct net_device *ol_dev);
int (*fib_entry_op)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_ipip_entry *ipip_entry,
- enum mlxsw_reg_ralue_op op,
- u32 tunnel_index);
+ enum mlxsw_sp_fib_entry_op op,
+ u32 tunnel_index,
+ struct mlxsw_sp_fib_entry_priv *priv);
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4381f8c6c3fb..147dd8fab2af 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -352,6 +352,7 @@ enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
};
+struct mlxsw_sp_nexthop_group_info;
struct mlxsw_sp_nexthop_group;
struct mlxsw_sp_fib_entry;
@@ -368,18 +369,71 @@ struct mlxsw_sp_fib_entry_decap {
u32 tunnel_index;
};
+static struct mlxsw_sp_fib_entry_priv *
+mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
+{
+ struct mlxsw_sp_fib_entry_priv *priv;
+
+ if (!ll_ops->fib_entry_priv_size)
+ /* No need to have priv */
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&priv->refcnt, 1);
+ return priv;
+}
+
+static void
+mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ kfree(priv);
+}
+
+static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ refcount_inc(&priv->refcnt);
+}
+
+static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ if (!priv || !refcount_dec_and_test(&priv->refcnt))
+ return;
+ mlxsw_sp_fib_entry_priv_destroy(priv);
+}
+
+static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry_priv *priv)
+{
+ if (!priv)
+ return;
+ mlxsw_sp_fib_entry_priv_hold(priv);
+ list_add(&priv->list, &op_ctx->fib_entry_priv_list);
+}
+
+static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ struct mlxsw_sp_fib_entry_priv *priv, *tmp;
+
+ list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
+ mlxsw_sp_fib_entry_priv_put(priv);
+ INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
+}
+
struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
+ struct mlxsw_sp_fib_entry_priv *priv;
};
struct mlxsw_sp_fib4_entry {
struct mlxsw_sp_fib_entry common;
+ struct fib_info *fi;
u32 tb_id;
- u32 prio;
u8 tos;
u8 type;
};
@@ -409,6 +463,7 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
+ const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -422,12 +477,31 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
+static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
+ xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
+}
+
+static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
+ xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
+}
+
+static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
+ xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
+}
+
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
@@ -443,6 +517,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
+ fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -481,33 +556,36 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ ll_ops->ralta_write(mlxsw_sp, xralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralst_pl[MLXSW_REG_RALST_LEN];
+ char xralst_pl[MLXSW_REG_XRALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -515,19 +593,20 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -538,12 +617,11 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
- lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -554,14 +632,15 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -569,6 +648,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -582,7 +662,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -593,8 +673,11 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops =
+ mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
+
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -684,23 +767,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -1270,21 +1353,33 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
/* Given decap parameters, find the corresponding IPIP entry. */
static struct mlxsw_sp_ipip_entry *
-mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ul_dev,
+mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
enum mlxsw_sp_l3proto ul_proto,
union mlxsw_sp_l3addr ul_dip)
{
- struct mlxsw_sp_ipip_entry *ipip_entry;
+ struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
+ struct net_device *ul_dev;
+
+ rcu_read_lock();
+
+ ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
+ if (!ul_dev)
+ goto out_unlock;
list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
ipip_list_node)
if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
ul_proto, ul_dip,
ipip_entry))
- return ipip_entry;
+ goto out_unlock;
+
+ rcu_read_unlock();
return NULL;
+
+out_unlock:
+ rcu_read_unlock();
+ return ipip_entry;
}
static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
@@ -1370,11 +1465,7 @@ static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_ops *ops
= mlxsw_sp->router->ipip_ops_arr[ipipt];
- /* For deciding whether decap should be offloaded, we don't care about
- * overlay protocol, so ask whether either one is supported.
- */
- return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
- ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
+ return ops->can_offload(mlxsw_sp, ol_dev);
}
static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
@@ -2749,10 +2840,11 @@ struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
struct list_head rif_list_node;
struct list_head router_list_node;
- struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
- * this belongs to
- */
+ struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
+ * this nexthop belongs to
+ */
struct rhash_head ht_node;
+ struct neigh_table *neigh_tbl;
struct mlxsw_sp_nexthop_key key;
unsigned char gw_addr[sizeof(struct in6_addr)];
int ifindex;
@@ -2778,21 +2870,36 @@ struct mlxsw_sp_nexthop {
bool counter_valid;
};
-struct mlxsw_sp_nexthop_group {
- void *priv;
- struct rhash_head ht_node;
- struct list_head fib_list; /* list of fib entries that use this group */
- struct neigh_table *neigh_tbl;
- u8 adj_index_valid:1,
- gateway:1; /* routes using the group use a gateway */
+enum mlxsw_sp_nexthop_group_type {
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
+};
+
+struct mlxsw_sp_nexthop_group_info {
+ struct mlxsw_sp_nexthop_group *nh_grp;
u32 adj_index;
u16 ecmp_size;
u16 count;
int sum_norm_weight;
+ u8 adj_index_valid:1,
+ gateway:1; /* routes using the group use a gateway */
struct mlxsw_sp_nexthop nexthops[0];
#define nh_rif nexthops[0].rif
};
+struct mlxsw_sp_nexthop_group {
+ struct rhash_head ht_node;
+ struct list_head fib_list; /* list of fib entries that use this group */
+ union {
+ struct {
+ struct fib_info *fi;
+ } ipv4;
+ };
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ enum mlxsw_sp_nexthop_group_type type;
+ bool can_destroy;
+};
+
void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
@@ -2858,18 +2965,18 @@ unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
u32 *p_adj_size, u32 *p_adj_hash_index)
{
- struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
u32 adj_hash_index = 0;
int i;
- if (!nh->offloaded || !nh_grp->adj_index_valid)
+ if (!nh->offloaded || !nhgi->adj_index_valid)
return -EINVAL;
- *p_adj_index = nh_grp->adj_index;
- *p_adj_size = nh_grp->ecmp_size;
+ *p_adj_index = nhgi->adj_index;
+ *p_adj_size = nhgi->ecmp_size;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
if (nh_iter == nh)
break;
@@ -2888,11 +2995,11 @@ struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
{
- struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
return true;
@@ -2900,14 +3007,8 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
return false;
}
-static struct fib_info *
-mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
-{
- return nh_grp->priv;
-}
-
struct mlxsw_sp_nexthop_group_cmp_arg {
- enum mlxsw_sp_l3proto proto;
+ enum mlxsw_sp_nexthop_group_type type;
union {
struct fib_info *fi;
struct mlxsw_sp_fib6_entry *fib6_entry;
@@ -2921,10 +3022,10 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
const struct mlxsw_sp_nexthop *nh;
- nh = &nh_grp->nexthops[i];
+ nh = &nh_grp->nhgi->nexthops[i];
if (nh->ifindex == ifindex && nh->nh_weight == weight &&
ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
return true;
@@ -2939,7 +3040,7 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
- if (nh_grp->count != fib6_entry->nrt6)
+ if (nh_grp->nhgi->count != fib6_entry->nrt6)
return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
@@ -2964,10 +3065,13 @@ mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
- switch (cmp_arg->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
- case MLXSW_SP_L3_PROTO_IPV6:
+ if (nh_grp->type != cmp_arg->type)
+ return 1;
+
+ switch (cmp_arg->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ return cmp_arg->fi != nh_grp->ipv4.fi;
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
cmp_arg->fib6_entry);
default:
@@ -2976,12 +3080,6 @@ mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
}
}
-static int
-mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
-{
- return nh_grp->neigh_tbl->family;
-}
-
static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
{
const struct mlxsw_sp_nexthop_group *nh_grp = data;
@@ -2990,14 +3088,14 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
unsigned int val;
int i;
- switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
- case AF_INET:
- fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
+ switch (nh_grp->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ fi = nh_grp->ipv4.fi;
return jhash(&fi, sizeof(fi), seed);
- case AF_INET6:
- val = nh_grp->count;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
+ val = nh_grp->nhgi->count;
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ nh = &nh_grp->nhgi->nexthops[i];
val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
}
@@ -3031,10 +3129,10 @@ mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
{
const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
- switch (cmp_arg->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
+ switch (cmp_arg->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
- case MLXSW_SP_L3_PROTO_IPV6:
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
default:
WARN_ON(1);
@@ -3052,8 +3150,8 @@ static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
- !nh_grp->gateway)
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
+ !nh_grp->nhgi->gateway)
return 0;
return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
@@ -3064,8 +3162,8 @@ static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
- !nh_grp->gateway)
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
+ !nh_grp->nhgi->gateway)
return;
rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
@@ -3079,7 +3177,7 @@ mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
- cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
cmp_arg.fi = fi;
return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
&cmp_arg,
@@ -3092,7 +3190,7 @@ mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
- cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
cmp_arg.fib6_entry = fib6_entry;
return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
&cmp_arg,
@@ -3151,14 +3249,16 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+
if (fib == fib_entry->fib_node->fib)
continue;
fib = fib_entry->fib_node->fib;
err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
old_adj_index,
old_ecmp_size,
- nh_grp->adj_index,
- nh_grp->ecmp_size);
+ nhgi->adj_index,
+ nhgi->ecmp_size);
if (err)
return err;
}
@@ -3229,15 +3329,15 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
bool reallocate)
{
- u32 adj_index = nh_grp->adj_index; /* base */
+ u32 adj_index = nhgi->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
int i;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload) {
nh->offloaded = 0;
@@ -3337,13 +3437,13 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
+mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
{
int i, g = 0, sum_norm_weight = 0;
struct mlxsw_sp_nexthop *nh;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload)
continue;
@@ -3353,8 +3453,8 @@ mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
g = nh->nh_weight;
}
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload)
continue;
@@ -3362,18 +3462,18 @@ mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
sum_norm_weight += nh->norm_nh_weight;
}
- nh_grp->sum_norm_weight = sum_norm_weight;
+ nhgi->sum_norm_weight = sum_norm_weight;
}
static void
-mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
+mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
{
- int total = nh_grp->sum_norm_weight;
- u16 ecmp_size = nh_grp->ecmp_size;
int i, weight = 0, lower_bound = 0;
+ int total = nhgi->sum_norm_weight;
+ u16 ecmp_size = nhgi->ecmp_size;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
int upper_bound;
if (!nh->should_offload)
@@ -3395,8 +3495,8 @@ mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
if (nh->offloaded)
nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
@@ -3442,36 +3542,36 @@ static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
- case AF_INET:
+ switch (nh_grp->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
break;
- case AF_INET6:
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
break;
}
}
-static void
+static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
u16 ecmp_size, old_ecmp_size;
struct mlxsw_sp_nexthop *nh;
bool offload_change = false;
u32 adj_index;
bool old_adj_index_valid;
+ int i, err2, err = 0;
u32 old_adj_index;
- int i;
- int err;
- if (!nh_grp->gateway) {
+ if (!nhgi->gateway) {
mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
- return;
+ return 0;
}
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (nh->should_offload != nh->offloaded) {
offload_change = true;
@@ -3483,21 +3583,21 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* Nothing was added or removed, so no need to reallocate. Just
* update MAC on existing adjacency indexes.
*/
- err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
+ err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
}
- return;
+ return 0;
}
- mlxsw_sp_nexthop_group_normalize(nh_grp);
- if (!nh_grp->sum_norm_weight)
+ mlxsw_sp_nexthop_group_normalize(nhgi);
+ if (!nhgi->sum_norm_weight)
/* No neigh of this group is connected so we just set
* the trap and let everthing flow through kernel.
*/
goto set_trap;
- ecmp_size = nh_grp->sum_norm_weight;
+ ecmp_size = nhgi->sum_norm_weight;
err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
if (err)
/* No valid allocation size available. */
@@ -3512,14 +3612,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
goto set_trap;
}
- old_adj_index_valid = nh_grp->adj_index_valid;
- old_adj_index = nh_grp->adj_index;
- old_ecmp_size = nh_grp->ecmp_size;
- nh_grp->adj_index_valid = 1;
- nh_grp->adj_index = adj_index;
- nh_grp->ecmp_size = ecmp_size;
- mlxsw_sp_nexthop_group_rebalance(nh_grp);
- err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
+ old_adj_index_valid = nhgi->adj_index_valid;
+ old_adj_index = nhgi->adj_index;
+ old_ecmp_size = nhgi->ecmp_size;
+ nhgi->adj_index_valid = 1;
+ nhgi->adj_index = adj_index;
+ nhgi->ecmp_size = ecmp_size;
+ mlxsw_sp_nexthop_group_rebalance(nhgi);
+ err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@@ -3536,7 +3636,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
goto set_trap;
}
- return;
+ return 0;
}
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
@@ -3548,22 +3648,23 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
- return;
+ return 0;
set_trap:
- old_adj_index_valid = nh_grp->adj_index_valid;
- nh_grp->adj_index_valid = 0;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ old_adj_index_valid = nhgi->adj_index_valid;
+ nhgi->adj_index_valid = 0;
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
nh->offloaded = 0;
}
- err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
- if (err)
+ err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err2)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
if (old_adj_index_valid)
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
- nh_grp->ecmp_size, nh_grp->adj_index);
+ nhgi->ecmp_size, nhgi->adj_index);
+ return err;
}
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
@@ -3589,10 +3690,9 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
nh = list_first_entry(&neigh_entry->nexthop_list,
struct mlxsw_sp_nexthop, neigh_list_node);
- n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (!n) {
- n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
- nh->rif->dev);
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
@@ -3615,7 +3715,7 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
neigh_release(old_n);
neigh_clone(n);
__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
neigh_release(n);
@@ -3652,7 +3752,7 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(nh, &neigh_entry->nexthop_list,
neigh_list_node) {
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
@@ -3683,7 +3783,7 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
u8 nud_state, dead;
int err;
- if (!nh->nh_grp->gateway || nh->neigh_entry)
+ if (!nh->nhgi->gateway || nh->neigh_entry)
return 0;
/* Take a reference of neigh here ensuring that neigh would
@@ -3691,10 +3791,9 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
* The reference is taken either in neigh_lookup() or
* in neigh_create() in case n is not found.
*/
- n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (!n) {
- n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
- nh->rif->dev);
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
@@ -3775,7 +3874,7 @@ static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
{
bool removing;
- if (!nh->nh_grp->gateway || nh->ipip_entry)
+ if (!nh->nhgi->gateway || nh->ipip_entry)
return;
nh->ipip_entry = ipip_entry;
@@ -3807,27 +3906,11 @@ static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
}
-static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
-{
- switch (nh->type) {
- case MLXSW_SP_NEXTHOP_TYPE_ETH:
- mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
- mlxsw_sp_nexthop_rif_fini(nh);
- break;
- case MLXSW_SP_NEXTHOP_TYPE_IPIP:
- mlxsw_sp_nexthop_rif_fini(nh);
- mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
- break;
- }
-}
-
-static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh,
- struct fib_nh *fib_nh)
+static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ const struct net_device *dev)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct net_device *dev = fib_nh->fib_nh_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
@@ -3835,8 +3918,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
if (ipip_entry) {
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- if (ipip_ops->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4)) {
+ if (ipip_ops->can_offload(mlxsw_sp, dev)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
return 0;
@@ -3860,10 +3942,19 @@ err_neigh_init:
return err;
}
-static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
+static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_rif_fini(nh);
+ break;
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ mlxsw_sp_nexthop_rif_fini(nh);
+ mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
+ break;
+ }
}
static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
@@ -3875,7 +3966,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
struct in_device *in_dev;
int err;
- nh->nh_grp = nh_grp;
+ nh->nhgi = nh_grp->nhgi;
nh->key.fib_nh = fib_nh;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight = fib_nh->fib_nh_weight;
@@ -3883,6 +3974,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
nh->nh_weight = 1;
#endif
memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
+ nh->neigh_tbl = &arp_tbl;
err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
if (err)
return err;
@@ -3892,6 +3984,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (!dev)
return 0;
+ nh->ifindex = dev->ifindex;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
@@ -3902,7 +3995,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
}
rcu_read_unlock();
- err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
if (err)
goto err_nexthop_neigh_init;
@@ -3916,7 +4009,7 @@ err_nexthop_neigh_init:
static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
@@ -3938,14 +4031,14 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
switch (event) {
case FIB_EVENT_NH_ADD:
- mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
+ mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
break;
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
break;
}
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
@@ -3968,7 +4061,7 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
}
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
@@ -3991,7 +4084,7 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
@@ -4004,45 +4097,94 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
+static int
+mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop *nh;
+ int err, i;
+
+ nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
+ nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
+ nhgi->count = nhs;
+ for (i = 0; i < nhgi->count; i++) {
+ struct fib_nh *fib_nh;
+
+ nh = &nhgi->nexthops[i];
+ fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
+ err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
+ if (err)
+ goto err_nexthop4_init;
+ }
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ i = nhgi->count;
+err_nexthop4_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
{
- unsigned int nhs = fib_info_num_path(fi);
struct mlxsw_sp_nexthop_group *nh_grp;
- struct mlxsw_sp_nexthop *nh;
- struct fib_nh *fib_nh;
- int i;
int err;
- nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
if (!nh_grp)
return ERR_PTR(-ENOMEM);
- nh_grp->priv = fi;
INIT_LIST_HEAD(&nh_grp->fib_list);
- nh_grp->neigh_tbl = &arp_tbl;
-
- nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
- nh_grp->count = nhs;
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
+ nh_grp->ipv4.fi = fi;
fib_info_hold(fi);
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
- fib_nh = fib_info_nh(fi, i);
- err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
- if (err)
- goto err_nexthop4_init;
- }
+
+ err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_nexthop_group_info_init;
+
err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
if (err)
goto err_nexthop_group_insert;
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+
+ nh_grp->can_destroy = true;
+
return nh_grp;
err_nexthop_group_insert:
-err_nexthop4_init:
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
- }
+ mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
+err_nexthop_group_info_init:
fib_info_put(fi);
kfree(nh_grp);
return ERR_PTR(err);
@@ -4052,17 +4194,11 @@ static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- struct mlxsw_sp_nexthop *nh;
- int i;
-
+ if (!nh_grp->can_destroy)
+ return;
mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
- }
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
- WARN_ON_ONCE(nh_grp->adj_index_valid);
- fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
+ mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
+ fib_info_put(nh_grp->ipv4.fi);
kfree(nh_grp);
}
@@ -4120,9 +4256,9 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return !!nh_group->adj_index_valid;
+ return !!nh_group->nhgi->adj_index_valid;
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return !!nh_group->nh_rif;
+ return !!nh_group->nhgi->nh_rif;
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
@@ -4138,8 +4274,8 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
@@ -4156,7 +4292,6 @@ static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
int dst_len = fib_entry->fib_node->key.prefix_len;
struct mlxsw_sp_fib4_entry *fib4_entry;
@@ -4166,7 +4301,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- fri.fi = fi;
+ fri.fi = fib4_entry->fi;
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
@@ -4181,7 +4316,6 @@ static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
int dst_len = fib_entry->fib_node->key.prefix_len;
struct mlxsw_sp_fib4_entry *fib4_entry;
@@ -4189,7 +4323,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- fri.fi = fi;
+ fri.fi = fib4_entry->fi;
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
@@ -4264,13 +4398,14 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
switch (op) {
- case MLXSW_REG_RALUE_OP_WRITE_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
- case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+ case MLXSW_SP_FIB_ENTRY_OP_DELETE:
mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
default:
@@ -4278,32 +4413,132 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
}
}
+struct mlxsw_sp_fib_entry_op_ctx_basic {
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+};
+
static void
-mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
- const struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_sp_l3proto proto,
+ enum mlxsw_sp_fib_entry_op op,
+ u16 virtual_router, u8 prefix_len,
+ unsigned char *addr,
+ struct mlxsw_sp_fib_entry_priv *priv)
{
- struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
- enum mlxsw_reg_ralxx_protocol proto;
- u32 *p_dip;
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+ enum mlxsw_reg_ralxx_protocol ralxx_proto;
+ char *ralue_pl = op_ctx_basic->ralue_pl;
+ enum mlxsw_reg_ralue_op ralue_op;
- proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
+ ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
- switch (fib->proto) {
+ switch (op) {
+ case MLXSW_SP_FIB_ENTRY_OP_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
+ ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
+ break;
+ case MLXSW_SP_FIB_ENTRY_OP_DELETE:
+ ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- p_dip = (u32 *) fib_entry->fib_node->key.addr;
- mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
+ virtual_router, prefix_len, (u32 *) addr);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- fib_entry->fib_node->key.addr);
+ mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
+ virtual_router, prefix_len, addr);
break;
}
}
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
+ trap_id, adjacency_index, ecmp_size);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
+ trap_id, local_erif);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ u32 tunnel_ptr)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
+}
+
+static int
+mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ bool *postponed_for_bulk)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
+ op_ctx_basic->ralue_pl);
+}
+
+static bool
+mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ return true;
+}
+
+static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_sp_fib_entry_op op)
+{
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+
+ mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
+ fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr,
+ fib_entry->priv);
+}
+
+int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ const struct mlxsw_sp_router_ll_ops *ll_ops)
+{
+ bool postponed_for_bulk = false;
+ int err;
+
+ err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
+ if (!postponed_for_bulk)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ return err;
+}
+
static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
{
enum mlxsw_reg_ratr_trap_action trap_action;
@@ -4338,11 +4573,13 @@ err_ratr_write:
}
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -4355,12 +4592,11 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
*/
if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
- adjacency_index = fib_entry->nh_group->adj_index;
- ecmp_size = fib_entry->nh_group->ecmp_size;
- } else if (!nh_group->adj_index_valid && nh_group->count &&
- nh_group->nh_rif) {
+ adjacency_index = nhgi->adj_index;
+ ecmp_size = nhgi->ecmp_size;
+ } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
err = mlxsw_sp_adj_discard_write(mlxsw_sp,
- nh_group->nh_rif->rif_index);
+ nhgi->nh_rif->rif_index);
if (err)
return err;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
@@ -4371,19 +4607,20 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
- adjacency_index, ecmp_size);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
u16 rif_index = 0;
@@ -4395,58 +4632,62 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
- rif_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_ip2me_pack(op_ctx);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
@@ -4454,52 +4695,53 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
- fib_entry->decap.tunnel_index);
+ return ipip_ops->fib_entry_op(mlxsw_sp, ll_ops, op_ctx, ipip_entry, op,
+ fib_entry->decap.tunnel_index, fib_entry->priv);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
- fib_entry->decap.tunnel_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
- return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
- return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
- return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
- op);
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
- return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
- fib_entry, op);
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
- return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
if (err)
return err;
@@ -4509,18 +4751,35 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return err;
}
+static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ bool is_new)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
+ is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
+ MLXSW_SP_FIB_ENTRY_OP_UPDATE);
+}
+
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
- MLXSW_REG_RALUE_OP_WRITE_WRITE);
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
+
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
- MLXSW_REG_RALUE_OP_WRITE_DELETE);
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+
+ if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
+ return 0;
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
+ MLXSW_SP_FIB_ENTRY_OP_DELETE);
}
static int
@@ -4528,17 +4787,17 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
struct mlxsw_sp_router *router = mlxsw_sp->router;
u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
+ int ifindex = nhgi->nexthops[0].ifindex;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct fib_info *fi = fen_info->fi;
switch (fen_info->type) {
case RTN_LOCAL:
- ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4, dip);
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV4, dip);
if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
@@ -4571,7 +4830,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
return 0;
case RTN_UNICAST:
- if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
+ if (nhgi->gateway)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
@@ -4608,15 +4867,22 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib4_entry->common;
- err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
- if (err)
- goto err_fib4_entry_type_set;
+ fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
+ if (IS_ERR(fib_entry->priv)) {
+ err = PTR_ERR(fib_entry->priv);
+ goto err_fib_entry_priv_create;
+ }
err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
if (err)
goto err_nexthop4_group_get;
- fib4_entry->prio = fen_info->fi->fib_priority;
+ err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
+ if (err)
+ goto err_fib4_entry_type_set;
+
+ fib4_entry->fi = fen_info->fi;
+ fib_info_hold(fib4_entry->fi);
fib4_entry->tb_id = fen_info->tb_id;
fib4_entry->type = fen_info->type;
fib4_entry->tos = fen_info->tos;
@@ -4625,9 +4891,11 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return fib4_entry;
-err_nexthop4_group_get:
- mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
err_fib4_entry_type_set:
+ mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+err_nexthop4_group_get:
+ mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
+err_fib_entry_priv_create:
kfree(fib4_entry);
return ERR_PTR(err);
}
@@ -4635,8 +4903,10 @@ err_fib4_entry_type_set:
static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib4_entry *fib4_entry)
{
- mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ fib_info_put(fib4_entry->fi);
mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
kfree(fib4_entry);
}
@@ -4665,8 +4935,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (fib4_entry->tb_id == fen_info->tb_id &&
fib4_entry->tos == fen_info->tos &&
fib4_entry->type == fen_info->type &&
- mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
- fen_info->fi)
+ fib4_entry->fi == fen_info->fi)
return fib4_entry;
return NULL;
@@ -4875,14 +5144,16 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ bool is_new = !fib_node->fib_entry;
int err;
fib_node->fib_entry = fib_entry;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
if (err)
goto err_fib_entry_update;
@@ -4893,14 +5164,25 @@ err_fib_entry_update:
return err;
}
-static void
-mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ int err;
- mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
fib_node->fib_entry = NULL;
+ return err;
+}
+
+static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
+
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
}
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
@@ -4922,6 +5204,7 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
@@ -4955,7 +5238,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
goto err_fib_node_entry_link;
@@ -4980,23 +5263,26 @@ err_fib4_entry_create:
return err;
}
-static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry;
struct mlxsw_sp_fib_node *fib_node;
+ int err;
if (mlxsw_sp->router->aborted)
- return;
+ return 0;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
- return;
+ return 0;
fib_node = fib4_entry->common.fib_node;
- mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
+ err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
}
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
@@ -5047,7 +5333,8 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
{
struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
- fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ if (!mlxsw_sp_rt6->rt->nh)
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
kfree(mlxsw_sp_rt6);
}
@@ -5081,51 +5368,6 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
}
-static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp,
- struct mlxsw_sp_nexthop *nh,
- const struct fib6_info *rt)
-{
- const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh->fib_nh_dev;
- struct mlxsw_sp_rif *rif;
- int err;
-
- ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
- if (ipip_entry) {
- ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- if (ipip_ops->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV6)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
- return 0;
- }
- }
-
- nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- return 0;
- mlxsw_sp_nexthop_rif_init(nh, rif);
-
- err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
- if (err)
- goto err_nexthop_neigh_init;
-
- return 0;
-
-err_nexthop_neigh_init:
- mlxsw_sp_nexthop_rif_fini(nh);
- return err;
-}
-
-static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
-{
- mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
-}
-
static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh,
@@ -5133,9 +5375,12 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
{
struct net_device *dev = rt->fib6_nh->fib_nh_dev;
- nh->nh_grp = nh_grp;
+ nh->nhgi = nh_grp->nhgi;
nh->nh_weight = rt->fib6_nh->fib_nh_weight;
memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
+#if IS_ENABLED(CONFIG_IPV6)
+ nh->neigh_tbl = &nd_tbl;
+#endif
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -5144,13 +5389,13 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
return 0;
nh->ifindex = dev->ifindex;
- return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
+ return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
}
static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
}
@@ -5162,51 +5407,98 @@ static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
-static struct mlxsw_sp_nexthop_group *
-mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int
+mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
struct mlxsw_sp_nexthop *nh;
- int i = 0;
- int err;
+ int err, i;
- nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
- GFP_KERNEL);
- if (!nh_grp)
- return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&nh_grp->fib_list);
-#if IS_ENABLED(CONFIG_IPV6)
- nh_grp->neigh_tbl = &nd_tbl;
-#endif
+ nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
+ GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
- nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
- nh_grp->count = fib6_entry->nrt6;
- for (i = 0; i < nh_grp->count; i++) {
+ nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
+ nhgi->count = fib6_entry->nrt6;
+ for (i = 0; i < nhgi->count; i++) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- nh = &nh_grp->nexthops[i];
+ nh = &nhgi->nexthops[i];
err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
if (err)
goto err_nexthop6_init;
mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
}
+ nh_grp->nhgi = nhgi;
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ i = nhgi->count;
+err_nexthop6_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
+
+ err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
+ if (err)
+ goto err_nexthop_group_info_init;
err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
if (err)
goto err_nexthop_group_insert;
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ nh_grp->can_destroy = true;
+
return nh_grp;
err_nexthop_group_insert:
-err_nexthop6_init:
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
- }
+ mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
+err_nexthop_group_info_init:
kfree(nh_grp);
return ERR_PTR(err);
}
@@ -5215,16 +5507,10 @@ static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- struct mlxsw_sp_nexthop *nh;
- int i = nh_grp->count;
-
+ if (!nh_grp->can_destroy)
+ return;
mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
- }
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
- WARN_ON(nh_grp->adj_index_valid);
+ mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
kfree(nh_grp);
}
@@ -5240,15 +5526,15 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(nh_grp);
}
- list_add_tail(&fib6_entry->common.nexthop_group_node,
- &nh_grp->fib_list);
- fib6_entry->common.nh_group = nh_grp;
-
/* The route and the nexthop are described by the same struct, so we
* need to the update the nexthop offload indication for the new route.
*/
__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+ list_add_tail(&fib6_entry->common.nexthop_group_node,
+ &nh_grp->fib_list);
+ fib6_entry->common.nh_group = nh_grp;
+
return 0;
}
@@ -5263,9 +5549,9 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
-static int
-mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
int err;
@@ -5281,7 +5567,8 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
+ err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
+ &fib6_entry->common, false);
if (err)
goto err_fib_entry_update;
@@ -5301,6 +5588,7 @@ err_nexthop6_group_get:
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -5318,7 +5606,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
if (err)
goto err_nexthop6_group_update;
@@ -5339,6 +5627,7 @@ err_rt6_create:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -5356,26 +5645,20 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
}
static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
const struct fib6_info *rt)
{
- /* Packets hitting RTF_REJECT routes need to be discarded by the
- * stack. We can rely on their destination device not having a
- * RIF (it's the loopback device) and can thus use action type
- * local, which will cause them to be trapped with a lower
- * priority than packets that need to be locally received.
- */
if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
else if (rt->fib6_flags & RTF_REJECT)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
- else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
+ else if (fib_entry->nh_group->nhgi->gateway)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
@@ -5409,6 +5692,12 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
+ fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
+ if (IS_ERR(fib_entry->priv)) {
+ err = PTR_ERR(fib_entry->priv);
+ goto err_fib_entry_priv_create;
+ }
+
INIT_LIST_HEAD(&fib6_entry->rt6_list);
for (i = 0; i < nrt6; i++) {
@@ -5421,12 +5710,12 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
-
err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
if (err)
goto err_nexthop6_group_get;
+ mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+
fib_entry->fib_node = fib_node;
return fib6_entry;
@@ -5441,6 +5730,8 @@ err_rt6_create:
list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
+ mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
+err_fib_entry_priv_create:
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -5451,6 +5742,7 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
WARN_ON(fib6_entry->nrt6);
+ mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
kfree(fib6_entry);
}
@@ -5508,8 +5800,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
}
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
struct mlxsw_sp_fib_entry *replaced;
@@ -5548,7 +5840,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
if (err)
goto err_fib_node_entry_link;
@@ -5572,8 +5864,8 @@ err_fib6_entry_create:
}
static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -5604,8 +5896,7 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib6_entry, common);
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
- nrt6);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
@@ -5616,19 +5907,20 @@ err_fib6_entry_nexthop_add:
return err;
}
-static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
+ int err;
if (mlxsw_sp->router->aborted)
- return;
+ return 0;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
- return;
+ return 0;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
@@ -5637,58 +5929,66 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (!fib6_entry)
- return;
+ return 0;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (nrt6 != fib6_entry->nrt6) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
- nrt6);
- return;
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
+ return 0;
}
fib_node = fib6_entry->common.fib_node;
- mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
+ err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
}
static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_reg_ralxx_protocol proto,
+ enum mlxsw_sp_l3proto proto,
u8 tree_id)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
- char ralst_pl[MLXSW_REG_RALST_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
+ enum mlxsw_reg_ralxx_protocol ralxx_proto =
+ (enum mlxsw_reg_ralxx_protocol) proto;
+ struct mlxsw_sp_fib_entry_priv *priv;
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char xralst_pl[MLXSW_REG_XRALST_LEN];
int i, err;
- mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
+ err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
if (err)
return err;
- mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
+ err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
if (err)
return err;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- char raltb_pl[MLXSW_REG_RALTB_LEN];
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- raltb_pl);
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
+ err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
if (err)
return err;
- mlxsw_reg_ralue_pack(ralue_pl, proto,
- MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
- ralue_pl);
+ priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
+ vr->id, 0, NULL, priv);
+ ll_ops->fib_entry_act_ip2me_pack(op_ctx);
+ err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
+ mlxsw_sp_fib_entry_priv_put(priv);
if (err)
return err;
}
@@ -5784,7 +6084,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
- enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
+ enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
int err;
err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
@@ -5796,7 +6096,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
* packets that don't match any routes are trapped to the CPU.
*/
- proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
+ proto = MLXSW_SP_L3_PROTO_IPV6;
return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
MLXSW_SP_LPM_TREE_MIN + 1);
}
@@ -5901,15 +6201,15 @@ static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}
-struct mlxsw_sp_fib6_event_work {
+struct mlxsw_sp_fib6_event {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
-struct mlxsw_sp_fib_event_work {
- struct work_struct work;
+struct mlxsw_sp_fib_event {
+ struct list_head list; /* node in fib queue */
union {
- struct mlxsw_sp_fib6_event_work fib6_work;
+ struct mlxsw_sp_fib6_event fib6_event;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -5918,11 +6218,12 @@ struct mlxsw_sp_fib_event_work {
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
+ int family;
};
static int
-mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
- struct fib6_entry_notifier_info *fen6_info)
+mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
+ struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
@@ -5936,8 +6237,8 @@ mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
if (!rt_arr)
return -ENOMEM;
- fib6_work->rt_arr = rt_arr;
- fib6_work->nrt6 = nrt6;
+ fib6_event->rt_arr = rt_arr;
+ fib6_event->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
@@ -5959,170 +6260,232 @@ mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
}
static void
-mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
+mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
{
int i;
- for (i = 0; i < fib6_work->nrt6; i++)
- mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
- kfree(fib6_work->rt_arr);
+ for (i = 0; i < fib6_event->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
+ kfree(fib6_event->rt_arr);
}
-static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
- &fib_work->fen_info);
- if (err)
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- fib_info_put(fib_work->fen_info.fi);
+ }
+ fib_info_put(fib_event->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
- fib_info_put(fib_work->fen_info.fi);
+ err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ if (err)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ fib_info_put(fib_event->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
- fib_work->fnh_info.fib_nh);
- fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
+ fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
- kfree(fib_work);
}
-static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- if (err)
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ }
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
case FIB_EVENT_ENTRY_APPEND:
- err = mlxsw_sp_router_fib6_append(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- if (err)
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ }
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fib6_del(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
- kfree(fib_work);
}
-static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
bool replace;
int err;
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
+ replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
- replace);
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mr_cache_put(fib_work->men_info.mfc);
+ mr_cache_put(fib_event->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
- mr_cache_put(fib_work->men_info.mfc);
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
+ mr_cache_put(fib_event->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
- &fib_work->ven_info);
+ &fib_event->ven_info);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- dev_put(fib_work->ven_info.dev);
+ dev_put(fib_event->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
- mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
- &fib_work->ven_info);
- dev_put(fib_work->ven_info.dev);
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
+ dev_put(fib_event->ven_info.dev);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
- kfree(fib_work);
}
-static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
+static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
+ struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
+ struct mlxsw_sp_fib_event *next_fib_event;
+ struct mlxsw_sp_fib_event *fib_event;
+ int last_family = AF_UNSPEC;
+ LIST_HEAD(fib_event_queue);
+
+ spin_lock_bh(&router->fib_event_queue_lock);
+ list_splice_init(&router->fib_event_queue, &fib_event_queue);
+ spin_unlock_bh(&router->fib_event_queue_lock);
+
+ /* Router lock is held here to make sure per-instance
+ * operation context is not used in between FIB4/6 events
+ * processing.
+ */
+ mutex_lock(&router->lock);
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ list_for_each_entry_safe(fib_event, next_fib_event,
+ &fib_event_queue, list) {
+ /* Check if the next entry in the queue exists and it is
+ * of the same type (family and event) as the currect one.
+ * In that case it is permitted to do the bulking
+ * of multiple FIB entries to a single register write.
+ */
+ op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
+ fib_event->family == next_fib_event->family &&
+ fib_event->event == next_fib_event->event;
+
+ /* In case family of this and the previous entry are different, context
+ * reinitialization is going to be needed now, indicate that.
+ * Note that since last_family is initialized to AF_UNSPEC, this is always
+ * going to happen for the first entry processed in the work.
+ */
+ if (fib_event->family != last_family)
+ op_ctx->initialized = false;
+
+ switch (fib_event->family) {
+ case AF_INET:
+ mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
+ fib_event);
+ break;
+ case AF_INET6:
+ mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
+ fib_event);
+ break;
+ case RTNL_FAMILY_IP6MR:
+ case RTNL_FAMILY_IPMR:
+ /* Unlock here as inside FIBMR the lock is taken again
+ * under RTNL. The per-instance operation context
+ * is not used by FIBMR.
+ */
+ mutex_unlock(&router->lock);
+ mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
+ fib_event);
+ mutex_lock(&router->lock);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ last_family = fib_event->family;
+ kfree(fib_event);
+ cond_resched();
+ }
+ WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
+ mutex_unlock(&router->lock);
+}
+
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
- fib_work->fen_info = *fen_info;
+ fib_event->fen_info = *fen_info;
/* Take reference on fib_info to prevent it from being
- * freed while work is queued. Release it afterwards.
+ * freed while event is queued. Release it afterwards.
*/
- fib_info_hold(fib_work->fen_info.fi);
+ fib_info_hold(fib_event->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
- fib_work->fnh_info = *fnh_info;
- fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
+ fib_event->fnh_info = *fnh_info;
+ fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
break;
}
}
-static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
int err;
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
- fen6_info);
+ err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
+ fen6_info);
if (err)
return err;
break;
@@ -6132,20 +6495,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
}
static void
-mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
- mr_cache_hold(fib_work->men_info.mfc);
+ memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
+ mr_cache_hold(fib_event->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
- memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
- dev_hold(fib_work->ven_info.dev);
+ memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
+ dev_hold(fib_event->ven_info.dev);
break;
}
}
@@ -6202,7 +6565,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_fib_event_work *fib_work;
+ struct mlxsw_sp_fib_event *fib_event;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err;
@@ -6252,37 +6615,39 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
break;
}
- fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
- if (!fib_work)
+ fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
+ if (!fib_event)
return NOTIFY_BAD;
- fib_work->mlxsw_sp = router->mlxsw_sp;
- fib_work->event = event;
+ fib_event->mlxsw_sp = router->mlxsw_sp;
+ fib_event->event = event;
+ fib_event->family = info->family;
switch (info->family) {
case AF_INET:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
- mlxsw_sp_router_fib4_event(fib_work, info);
+ mlxsw_sp_router_fib4_event(fib_event, info);
break;
case AF_INET6:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
- err = mlxsw_sp_router_fib6_event(fib_work, info);
+ err = mlxsw_sp_router_fib6_event(fib_event, info);
if (err)
goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
- mlxsw_sp_router_fibmr_event(fib_work, info);
+ mlxsw_sp_router_fibmr_event(fib_event, info);
break;
}
- mlxsw_core_schedule_work(&fib_work->work);
+ /* Enqueue the event and trigger the work */
+ spin_lock_bh(&router->fib_event_queue_lock);
+ list_add_tail(&fib_event->list, &router->fib_event_queue);
+ spin_unlock_bh(&router->fib_event_queue_lock);
+ mlxsw_core_schedule_work(&router->fib_event_work);
return NOTIFY_DONE;
err_fib_event:
- kfree(fib_work);
+ kfree(fib_event);
return NOTIFY_BAD;
}
@@ -8057,6 +8422,45 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
+static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
+ .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
+ .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
+ .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
+ .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
+ .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
+ .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
+ .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
+ .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
+ .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
+ .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
+ .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
+};
+
+static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
+{
+ size_t max_size = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
+ size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
+
+ if (size > max_size)
+ max_size = size;
+ }
+ router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
+ GFP_KERNEL);
+ if (!router->ll_op_ctx)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
+ return 0;
+}
+
+static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
+{
+ WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
+ kfree(router->ll_op_ctx);
+}
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
@@ -8070,6 +8474,13 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
+ router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_router_ll_basic_ops;
+ router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
+
+ err = mlxsw_sp_router_ll_op_ctx_init(router);
+ if (err)
+ goto err_ll_op_ctx_init;
+
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -8118,6 +8529,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
+ INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
+ INIT_LIST_HEAD(&router->fib_event_queue);
+ spin_lock_init(&router->fib_event_queue_lock);
+
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
@@ -8151,6 +8566,7 @@ err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
+ WARN_ON(!list_empty(&router->fib_event_queue));
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
@@ -8171,6 +8587,8 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_router_ll_op_ctx_fini(router);
+err_ll_op_ctx_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -8184,6 +8602,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
+ WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
@@ -8193,6 +8612,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 8418dc3ae967..8230f6ff02ed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -15,6 +15,26 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
+struct mlxsw_sp_fib_entry_op_ctx {
+ u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk
+ * the actual entry with the one that is the next
+ * in queue.
+ */
+ initialized:1; /* Bit that the low-level op sets in case
+ * the context priv is initialized.
+ */
+ struct list_head fib_entry_priv_list;
+ unsigned long ll_priv[];
+};
+
+static inline void
+mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list));
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
+}
+
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
@@ -48,8 +68,58 @@ struct mlxsw_sp_router {
bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
+ struct work_struct fib_event_work;
+ struct list_head fib_event_queue;
+ spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
+ /* One set of ops for each protocol: IPv4 and IPv6 */
+ const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
+ struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
+};
+
+struct mlxsw_sp_fib_entry_priv {
+ refcount_t refcnt;
+ struct list_head list; /* Member in op_ctx->fib_entry_priv_list */
+ unsigned long priv[];
+};
+
+enum mlxsw_sp_fib_entry_op {
+ MLXSW_SP_FIB_ENTRY_OP_WRITE,
+ MLXSW_SP_FIB_ENTRY_OP_UPDATE,
+ MLXSW_SP_FIB_ENTRY_OP_DELETE,
};
+/* Low-level router ops. Basically this is to handle the different
+ * register sets to work with ordinary and XM trees and FIB entries.
+ */
+struct mlxsw_sp_router_ll_ops {
+ int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
+ int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
+ int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
+ size_t fib_entry_op_ctx_size;
+ size_t fib_entry_priv_size;
+ void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op,
+ u16 virtual_router, u8 prefix_len, unsigned char *addr,
+ struct mlxsw_sp_fib_entry_priv *priv);
+ void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size);
+ void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif);
+ void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx);
+ void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ u32 tunnel_ptr);
+ int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ bool *postponed_for_bulk);
+ bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
+};
+
+int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ const struct mlxsw_sp_router_ll_ops *ll_ops);
+
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index dcde496da7fb..c5de8f46cdd3 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -780,7 +780,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- phy_ethtool_get_wol(netdev->phydev, wol);
+
+ if (netdev->phydev)
+ phy_ethtool_get_wol(netdev->phydev, wol);
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
@@ -809,9 +811,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- phy_ethtool_set_wol(netdev->phydev, wol);
-
- return 0;
+ return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
+ : -ENETDOWN;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index b319c22c211c..794510cba51f 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -830,14 +830,13 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
static int lan743x_mac_open(struct lan743x_adapter *adapter)
{
- int ret = 0;
u32 temp;
temp = lan743x_csr_read(adapter, MAC_RX);
lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
temp = lan743x_csr_read(adapter, MAC_TX);
lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
- return ret;
+ return 0;
}
static void lan743x_mac_close(struct lan743x_adapter *adapter)
@@ -958,7 +957,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
data = lan743x_csr_read(adapter, MAC_CR);
/* set interface mode */
- if (phy_interface_mode_is_rgmii(adapter->phy_mode))
+ if (phy_interface_is_rgmii(phydev))
/* RGMII */
data &= ~MAC_CR_MII_EN_;
else
@@ -1014,33 +1013,14 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct lan743x_phy *phy = &adapter->phy;
- struct phy_device *phydev = NULL;
- struct device_node *phynode;
- struct net_device *netdev;
+ struct phy_device *phydev;
int ret = -EIO;
- netdev = adapter->netdev;
- phynode = of_node_get(adapter->pdev->dev.of_node);
-
- if (phynode) {
- /* try devicetree phy, or fixed link */
- of_get_phy_mode(phynode, &adapter->phy_mode);
-
- if (of_phy_is_fixed_link(phynode)) {
- ret = of_phy_register_fixed_link(phynode);
- if (ret) {
- netdev_err(netdev,
- "cannot register fixed PHY\n");
- of_node_put(phynode);
- goto return_error;
- }
- }
- phydev = of_phy_connect(netdev, phynode,
- lan743x_phy_link_status_change, 0,
- adapter->phy_mode);
- of_node_put(phynode);
- }
+ /* try devicetree phy, or fixed link */
+ phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
+ lan743x_phy_link_status_change);
if (!phydev) {
/* try internal phy */
@@ -1048,10 +1028,9 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
if (!phydev)
goto return_error;
- adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
ret = phy_connect_direct(netdev, phydev,
lan743x_phy_link_status_change,
- adapter->phy_mode);
+ PHY_INTERFACE_MODE_GMII);
if (ret)
goto return_error;
}
@@ -1066,6 +1045,7 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
phy_start(phydev);
phy_start_aneg(phydev);
+ phy_attached_info(phydev);
return 0;
return_error:
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index a536f4a4994d..3a0e70daa88f 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -703,7 +703,6 @@ struct lan743x_rx {
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
- phy_interface_t phy_mode;
int msg_enable;
#ifdef CONFIG_PM
u32 wolopts;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 70bf8c67d7ef..2632fe2d2448 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -147,42 +147,20 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
- u16 vid)
+static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+ struct ocelot_vlan native_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val = 0;
- if (ocelot_port->vid != vid) {
- /* Always permit deleting the native VLAN (vid = 0) */
- if (ocelot_port->vid && vid) {
- dev_err(ocelot->dev,
- "Port already has a native VLAN: %d\n",
- ocelot_port->vid);
- return -EBUSY;
- }
- ocelot_port->vid = vid;
- }
+ ocelot_port->native_vlan = native_vlan;
- ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid),
REW_PORT_VLAN_CFG_PORT_VID_M,
REW_PORT_VLAN_CFG, port);
- if (ocelot_port->vlan_aware && !ocelot_port->vid)
- /* If port is vlan-aware and tagged, drop untagged and priority
- * tagged frames.
- */
- val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
- ocelot_rmw_gix(ocelot, val,
- ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
- ANA_PORT_DROP_CFG, port);
-
if (ocelot_port->vlan_aware) {
- if (ocelot_port->vid)
+ if (native_vlan.valid)
/* Tag all frames except when VID == DEFAULT_VLAN */
val = REW_TAG_CFG_TAG_CFG(1);
else
@@ -195,8 +173,38 @@ static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
ocelot_rmw_gix(ocelot, val,
REW_TAG_CFG_TAG_CFG_M,
REW_TAG_CFG, port);
+}
- return 0;
+/* Default vlan to clasify for untagged frames (may be zero) */
+static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
+ struct ocelot_vlan pvid_vlan)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 val = 0;
+
+ ocelot_port->pvid_vlan = pvid_vlan;
+
+ if (!ocelot_port->vlan_aware)
+ pvid_vlan.vid = 0;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid),
+ ANA_PORT_VLAN_CFG_VLAN_VID_M,
+ ANA_PORT_VLAN_CFG, port);
+
+ /* If there's no pvid, we should drop not only untagged traffic (which
+ * happens automatically), but also 802.1p traffic which gets
+ * classified to VLAN 0, but that is always in our RX filter, so it
+ * would get accepted were it not for this setting.
+ */
+ if (!pvid_vlan.valid && ocelot_port->vlan_aware)
+ val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG, port);
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
@@ -233,24 +241,30 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
ANA_PORT_VLAN_CFG, port);
- ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid);
+ ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
+ ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan);
return 0;
}
EXPORT_SYMBOL(ocelot_port_vlan_filtering);
-/* Default vlan to clasify for untagged frames (may be zero) */
-static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
+int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- ocelot_rmw_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
- ANA_PORT_VLAN_CFG_VLAN_VID_M,
- ANA_PORT_VLAN_CFG, port);
+ /* Deny changing the native VLAN, but always permit deleting it */
+ if (untagged && ocelot_port->native_vlan.vid != vid &&
+ ocelot_port->native_vlan.valid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ ocelot_port->native_vlan.vid);
+ return -EBUSY;
+ }
- ocelot_port->pvid = pvid;
+ return 0;
}
+EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
@@ -264,14 +278,21 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
return ret;
/* Default ingress vlan classification */
- if (pvid)
- ocelot_port_set_pvid(ocelot, port, vid);
+ if (pvid) {
+ struct ocelot_vlan pvid_vlan;
+
+ pvid_vlan.vid = vid;
+ pvid_vlan.valid = true;
+ ocelot_port_set_pvid(ocelot, port, pvid_vlan);
+ }
/* Untagged egress vlan clasification */
if (untagged) {
- ret = ocelot_port_set_native_vlan(ocelot, port, vid);
- if (ret)
- return ret;
+ struct ocelot_vlan native_vlan;
+
+ native_vlan.vid = vid;
+ native_vlan.valid = true;
+ ocelot_port_set_native_vlan(ocelot, port, native_vlan);
}
return 0;
@@ -290,12 +311,18 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return ret;
/* Ingress */
- if (ocelot_port->pvid == vid)
- ocelot_port_set_pvid(ocelot, port, 0);
+ if (ocelot_port->pvid_vlan.vid == vid) {
+ struct ocelot_vlan pvid_vlan = {0};
+
+ ocelot_port_set_pvid(ocelot, port, pvid_vlan);
+ }
/* Egress */
- if (ocelot_port->vid == vid)
- ocelot_port_set_native_vlan(ocelot, port, 0);
+ if (ocelot_port->native_vlan.vid == vid) {
+ struct ocelot_vlan native_vlan = {0};
+
+ ocelot_port_set_native_vlan(ocelot, port, native_vlan);
+ }
return 0;
}
@@ -542,26 +569,11 @@ EXPORT_SYMBOL(ocelot_get_txtstamp);
int ocelot_fdb_add(struct ocelot *ocelot, int port,
const unsigned char *addr, u16 vid)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
int pgid = port;
if (port == ocelot->npi)
pgid = PGID_CPU;
- if (!vid) {
- if (!ocelot_port->vlan_aware)
- /* If the bridge is not VLAN aware and no VID was
- * provided, set it to pvid to ensure the MAC entry
- * matches incoming untagged packets
- */
- vid = ocelot_port->pvid;
- else
- /* If the bridge is VLAN aware a VID must be provided as
- * otherwise the learnt entry wouldn't match any frame.
- */
- return -EINVAL;
- }
-
return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
}
EXPORT_SYMBOL(ocelot_fdb_add);
@@ -958,52 +970,88 @@ static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
return ENTRYTYPE_MACv4;
if (addr[0] == 0x33 && addr[1] == 0x33)
return ENTRYTYPE_MACv6;
- return ENTRYTYPE_NORMAL;
+ return ENTRYTYPE_LOCKED;
+}
+
+static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index,
+ unsigned long ports)
+{
+ struct ocelot_pgid *pgid;
+
+ pgid = kzalloc(sizeof(*pgid), GFP_KERNEL);
+ if (!pgid)
+ return ERR_PTR(-ENOMEM);
+
+ pgid->ports = ports;
+ pgid->index = index;
+ refcount_set(&pgid->refcount, 1);
+ list_add_tail(&pgid->list, &ocelot->pgids);
+
+ return pgid;
+}
+
+static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid)
+{
+ if (!refcount_dec_and_test(&pgid->refcount))
+ return;
+
+ list_del(&pgid->list);
+ kfree(pgid);
}
-static int ocelot_mdb_get_pgid(struct ocelot *ocelot,
- enum macaccess_entry_type entry_type)
+static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot,
+ const struct ocelot_multicast *mc)
{
- int pgid;
+ struct ocelot_pgid *pgid;
+ int index;
/* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
* 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
* destination mask table (PGID), the destination set is programmed as
* part of the entry MAC address.", and the DEST_IDX is set to 0.
*/
- if (entry_type == ENTRYTYPE_MACv4 ||
- entry_type == ENTRYTYPE_MACv6)
- return 0;
+ if (mc->entry_type == ENTRYTYPE_MACv4 ||
+ mc->entry_type == ENTRYTYPE_MACv6)
+ return ocelot_pgid_alloc(ocelot, 0, mc->ports);
- for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) {
- struct ocelot_multicast *mc;
+ list_for_each_entry(pgid, &ocelot->pgids, list) {
+ /* When searching for a nonreserved multicast PGID, ignore the
+ * dummy PGID of zero that we have for MACv4/MACv6 entries
+ */
+ if (pgid->index && pgid->ports == mc->ports) {
+ refcount_inc(&pgid->refcount);
+ return pgid;
+ }
+ }
+
+ /* Search for a free index in the nonreserved multicast PGID area */
+ for_each_nonreserved_multicast_dest_pgid(ocelot, index) {
bool used = false;
- list_for_each_entry(mc, &ocelot->multicast, list) {
- if (mc->pgid == pgid) {
+ list_for_each_entry(pgid, &ocelot->pgids, list) {
+ if (pgid->index == index) {
used = true;
break;
}
}
if (!used)
- return pgid;
+ return ocelot_pgid_alloc(ocelot, index, mc->ports);
}
- return -1;
+ return ERR_PTR(-ENOSPC);
}
static void ocelot_encode_ports_to_mdb(unsigned char *addr,
- struct ocelot_multicast *mc,
- enum macaccess_entry_type entry_type)
+ struct ocelot_multicast *mc)
{
- memcpy(addr, mc->addr, ETH_ALEN);
+ ether_addr_copy(addr, mc->addr);
- if (entry_type == ENTRYTYPE_MACv4) {
+ if (mc->entry_type == ENTRYTYPE_MACv4) {
addr[0] = 0;
addr[1] = mc->ports >> 8;
addr[2] = mc->ports & 0xff;
- } else if (entry_type == ENTRYTYPE_MACv6) {
+ } else if (mc->entry_type == ENTRYTYPE_MACv6) {
addr[0] = mc->ports >> 8;
addr[1] = mc->ports & 0xff;
}
@@ -1012,80 +1060,78 @@ static void ocelot_encode_ports_to_mdb(unsigned char *addr,
int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
const struct switchdev_obj_port_mdb *mdb)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- enum macaccess_entry_type entry_type;
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
+ struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
- bool new = false;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
- if (!vid)
- vid = ocelot_port->pvid;
-
- entry_type = ocelot_classify_mdb(mdb->addr);
-
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
- int pgid = ocelot_mdb_get_pgid(ocelot, entry_type);
-
- if (pgid < 0) {
- dev_err(ocelot->dev,
- "No more PGIDs available for mdb %pM vid %d\n",
- mdb->addr, vid);
- return -ENOSPC;
- }
-
+ /* New entry */
mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
- memcpy(mc->addr, mdb->addr, ETH_ALEN);
+ mc->entry_type = ocelot_classify_mdb(mdb->addr);
+ ether_addr_copy(mc->addr, mdb->addr);
mc->vid = vid;
- mc->pgid = pgid;
list_add_tail(&mc->list, &ocelot->multicast);
- new = true;
- }
-
- if (!new) {
- ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+ } else {
+ /* Existing entry. Clean up the current port mask from
+ * hardware now, because we'll be modifying it.
+ */
+ ocelot_pgid_free(ocelot, mc->pgid);
+ ocelot_encode_ports_to_mdb(addr, mc);
ocelot_mact_forget(ocelot, addr, vid);
}
mc->ports |= BIT(port);
- ocelot_encode_ports_to_mdb(addr, mc, entry_type);
- return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type);
+ pgid = ocelot_mdb_get_pgid(ocelot, mc);
+ if (IS_ERR(pgid)) {
+ dev_err(ocelot->dev,
+ "Cannot allocate PGID for mdb %pM vid %d\n",
+ mc->addr, mc->vid);
+ devm_kfree(ocelot->dev, mc);
+ return PTR_ERR(pgid);
+ }
+ mc->pgid = pgid;
+
+ ocelot_encode_ports_to_mdb(addr, mc);
+
+ if (mc->entry_type != ENTRYTYPE_MACv4 &&
+ mc->entry_type != ENTRYTYPE_MACv6)
+ ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
+ pgid->index);
+
+ return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
+ mc->entry_type);
}
EXPORT_SYMBOL(ocelot_port_mdb_add);
int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
const struct switchdev_obj_port_mdb *mdb)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- enum macaccess_entry_type entry_type;
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
+ struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
- if (!vid)
- vid = ocelot_port->pvid;
-
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
return -ENOENT;
- entry_type = ocelot_classify_mdb(mdb->addr);
-
- ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+ ocelot_encode_ports_to_mdb(addr, mc);
ocelot_mact_forget(ocelot, addr, vid);
+ ocelot_pgid_free(ocelot, mc->pgid);
mc->ports &= ~BIT(port);
if (!mc->ports) {
list_del(&mc->list);
@@ -1093,9 +1139,21 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
return 0;
}
- ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+ /* We have a PGID with fewer ports now */
+ pgid = ocelot_mdb_get_pgid(ocelot, mc);
+ if (IS_ERR(pgid))
+ return PTR_ERR(pgid);
+ mc->pgid = pgid;
+
+ ocelot_encode_ports_to_mdb(addr, mc);
+
+ if (mc->entry_type != ENTRYTYPE_MACv4 &&
+ mc->entry_type != ENTRYTYPE_MACv6)
+ ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
+ pgid->index);
- return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type);
+ return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
+ mc->entry_type);
}
EXPORT_SYMBOL(ocelot_port_mdb_del);
@@ -1120,6 +1178,7 @@ EXPORT_SYMBOL(ocelot_port_bridge_join);
int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
+ struct ocelot_vlan pvid = {0}, native_vlan = {0};
struct switchdev_trans trans;
int ret;
@@ -1138,8 +1197,10 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
if (ret)
return ret;
- ocelot_port_set_pvid(ocelot, port, 0);
- return ocelot_port_set_native_vlan(ocelot, port, 0);
+ ocelot_port_set_pvid(ocelot, port, pvid);
+ ocelot_port_set_native_vlan(ocelot, port, native_vlan);
+
+ return 0;
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -1453,6 +1514,7 @@ int ocelot_init(struct ocelot *ocelot)
return -ENOMEM;
INIT_LIST_HEAD(&ocelot->multicast);
+ INIT_LIST_HEAD(&ocelot->pgids);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index abb407dff93c..291d39d49c4e 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -41,14 +41,6 @@ struct frame_info {
u32 timestamp; /* rew_val */
};
-struct ocelot_multicast {
- struct list_head list;
- unsigned char addr[ETH_ALEN];
- u16 vid;
- u16 ports;
- int pgid;
-};
-
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
@@ -87,6 +79,29 @@ enum macaccess_entry_type {
ENTRYTYPE_MACv6,
};
+/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports
+ * possibilities of egress port masks for L2 multicast traffic.
+ * For a switch with 9 user ports, there are 512 possible port masks, but the
+ * hardware only has 46 individual PGIDs that it can forward multicast traffic
+ * to. So we need a structure that maps the limited PGID indices to the port
+ * destinations requested by the user for L2 multicast.
+ */
+struct ocelot_pgid {
+ unsigned long ports;
+ int index;
+ refcount_t refcount;
+ struct list_head list;
+};
+
+struct ocelot_multicast {
+ struct list_head list;
+ enum macaccess_entry_type entry_type;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+ struct ocelot_pgid *pgid;
+};
+
int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data);
int ocelot_mact_learn(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index b34da11acf65..c65ae6f75a16 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -206,6 +206,17 @@ static void ocelot_port_adjust_link(struct net_device *dev)
ocelot_adjust_link(ocelot, port, dev->phydev);
}
+static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged);
+}
+
static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
bool untagged)
{
@@ -409,7 +420,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid);
+ return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid_vlan.vid);
}
static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
@@ -418,8 +429,8 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid,
- ENTRYTYPE_LOCKED);
+ return ocelot_mact_learn(ocelot, PGID_CPU, addr,
+ ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
}
static void ocelot_set_rx_mode(struct net_device *dev)
@@ -462,10 +473,10 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
const struct sockaddr *addr = p;
/* Learn the new net device MAC address in the mac table. */
- ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid,
- ENTRYTYPE_LOCKED);
+ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
+ ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid);
ether_addr_copy(dev->dev_addr, addr->sa_data);
return 0;
@@ -812,9 +823,14 @@ static int ocelot_port_obj_add_vlan(struct net_device *dev,
u16 vid;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ret = ocelot_vlan_vid_add(dev, vid,
- vlan->flags & BRIDGE_VLAN_INFO_PVID,
- vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+
+ if (switchdev_trans_ph_prepare(trans))
+ ret = ocelot_vlan_vid_prepare(dev, vid, pvid,
+ untagged);
+ else
+ ret = ocelot_vlan_vid_add(dev, vid, pvid, untagged);
if (ret)
return ret;
}
@@ -1074,8 +1090,8 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
dev->dev_addr[ETH_ALEN - 1] += port;
- ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
- ENTRYTYPE_LOCKED);
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
+ ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index d13d92bf7447..8f2f091bce89 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -1106,7 +1106,7 @@ static int s2io_print_pci_mode(struct s2io_nic *nic)
* '-1' on failure
*/
-static int init_tti(struct s2io_nic *nic, int link)
+static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
@@ -1166,7 +1166,7 @@ static int init_tti(struct s2io_nic *nic, int link)
if (wait_for_cmd_complete(&bar0->tti_command_mem,
TTI_CMD_MEM_STROBE_NEW_CMD,
- S2IO_BIT_RESET) != SUCCESS)
+ S2IO_BIT_RESET, may_sleep) != SUCCESS)
return FAILURE;
}
@@ -1659,7 +1659,7 @@ static int init_nic(struct s2io_nic *nic)
*/
/* Initialize TTI */
- if (SUCCESS != init_tti(nic, nic->last_link_state))
+ if (SUCCESS != init_tti(nic, nic->last_link_state, true))
return -ENODEV;
/* RTI Initialization */
@@ -3331,7 +3331,7 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
*/
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
- int bit_state)
+ int bit_state, bool may_sleep)
{
int ret = FAILURE, cnt = 0, delay = 1;
u64 val64;
@@ -3353,7 +3353,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
}
}
- if (in_interrupt())
+ if (!may_sleep)
mdelay(delay);
else
msleep(delay);
@@ -4877,8 +4877,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
* Return value:
* void.
*/
-
-static void s2io_set_multicast(struct net_device *dev)
+static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
{
int i, j, prev_cnt;
struct netdev_hw_addr *ha;
@@ -4903,7 +4902,7 @@ static void s2io_set_multicast(struct net_device *dev)
/* Wait till command completes */
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET);
+ S2IO_BIT_RESET, may_sleep);
sp->m_cast_flg = 1;
sp->all_multi_pos = config->max_mc_addr - 1;
@@ -4920,7 +4919,7 @@ static void s2io_set_multicast(struct net_device *dev)
/* Wait till command completes */
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET);
+ S2IO_BIT_RESET, may_sleep);
sp->m_cast_flg = 0;
sp->all_multi_pos = 0;
@@ -5000,7 +4999,7 @@ static void s2io_set_multicast(struct net_device *dev)
/* Wait for command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET)) {
+ S2IO_BIT_RESET, may_sleep)) {
DBG_PRINT(ERR_DBG,
"%s: Adding Multicasts failed\n",
dev->name);
@@ -5030,7 +5029,7 @@ static void s2io_set_multicast(struct net_device *dev)
/* Wait for command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET)) {
+ S2IO_BIT_RESET, may_sleep)) {
DBG_PRINT(ERR_DBG,
"%s: Adding Multicasts failed\n",
dev->name);
@@ -5041,6 +5040,12 @@ static void s2io_set_multicast(struct net_device *dev)
}
}
+/* NDO wrapper for s2io_set_multicast */
+static void s2io_ndo_set_multicast(struct net_device *dev)
+{
+ s2io_set_multicast(dev, false);
+}
+
/* read from CAM unicast & multicast addresses and store it in
* def_mac_addr structure
*/
@@ -5127,7 +5132,7 @@ static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
/* Wait till command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET)) {
+ S2IO_BIT_RESET, true)) {
DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
return FAILURE;
}
@@ -5171,7 +5176,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
/* Wait till command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET)) {
+ S2IO_BIT_RESET, true)) {
DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
return FAILURE;
}
@@ -7141,7 +7146,7 @@ static int s2io_card_up(struct s2io_nic *sp)
}
/* Setting its receive mode */
- s2io_set_multicast(dev);
+ s2io_set_multicast(dev, true);
if (dev->features & NETIF_F_LRO) {
/* Initialize max aggregatable pkts per session based on MTU */
@@ -7447,7 +7452,7 @@ static void s2io_link(struct s2io_nic *sp, int link)
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (link != sp->last_link_state) {
- init_tti(sp, link);
+ init_tti(sp, link, false);
if (link == LINK_DOWN) {
DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
s2io_stop_all_tx_queue(sp);
@@ -7604,7 +7609,7 @@ static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
- S2IO_BIT_RESET);
+ S2IO_BIT_RESET, true);
}
static const struct net_device_ops s2io_netdev_ops = {
@@ -7613,7 +7618,7 @@ static const struct net_device_ops s2io_netdev_ops = {
.ndo_get_stats = s2io_get_stats,
.ndo_start_xmit = s2io_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = s2io_set_multicast,
+ .ndo_set_rx_mode = s2io_ndo_set_multicast,
.ndo_do_ioctl = s2io_ioctl,
.ndo_set_mac_address = s2io_set_mac_addr,
.ndo_change_mtu = s2io_change_mtu,
@@ -7929,7 +7934,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
writeq(val64, &bar0->rmac_addr_cmd_mem);
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET);
+ S2IO_BIT_RESET, true);
tmp64 = readq(&bar0->rmac_addr_data0_mem);
mac_down = (u32)tmp64;
mac_up = (u32) (tmp64 >> 32);
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 6fa3159a977f..5a6032212c19 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1066,7 +1066,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue);
-static void s2io_set_multicast(struct net_device *dev);
+static void s2io_set_multicast(struct net_device *dev, bool may_sleep);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
static void s2io_link(struct s2io_nic * sp, int link);
static void s2io_reset(struct s2io_nic * sp);
@@ -1087,7 +1087,7 @@ static int s2io_set_swapper(struct s2io_nic * sp);
static void s2io_card_down(struct s2io_nic *nic);
static int s2io_card_up(struct s2io_nic *nic);
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
- int bit_state);
+ int bit_state, bool may_sleep);
static int s2io_add_isr(struct s2io_nic * sp);
static void s2io_rem_isr(struct s2io_nic * sp);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index f5d48d7c4ce2..da48dd85770c 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -1121,7 +1121,7 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
list_for_each_safe(p, n, &blockpool->free_entry_list) {
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree((void *)p);
+ kfree(p);
}
return;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 76c51da5b66f..9b32ae46011c 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -295,8 +295,8 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
ipv6 = true;
break;
}
-#endif
fallthrough;
+#endif
case AF_INET:
req_sz = sizeof(struct nfp_crypto_req_add_v4);
ipv6 = false;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 7ff2ccbd43b0..e672614d2906 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -724,10 +724,8 @@ static int nfp_pci_probe(struct pci_dev *pdev,
}
pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
- if (IS_ERR_OR_NULL(pf->cpp)) {
+ if (IS_ERR(pf->cpp)) {
err = PTR_ERR(pf->cpp);
- if (err >= 0)
- err = -ENOMEM;
goto err_disable_msix;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 2fc10a36afa4..8724d6a9ed02 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1043,8 +1043,7 @@ static int using_multi_irqs(struct net_device *dev)
struct fe_priv *np = get_nvpriv(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))
return 0;
else
return 1;
@@ -1666,11 +1665,7 @@ static void nv_update_stats(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- /* If it happens that this is run in top-half context, then
- * replace the spin_lock of hwstats_lock with
- * spin_lock_irqsave() in calling functions. */
- WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
- assert_spin_locked(&np->hwstats_lock);
+ lockdep_assert_held(&np->hwstats_lock);
/* query hardware */
np->estats.tx_bytes += readl(base + NvRegTxCnt);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ade8c44c01cd..b9e32e4478a8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2412,7 +2412,6 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_hw *hw = &adapter->hw;
u32 wufc = adapter->wake_up_evt;
- int retval = 0;
netif_device_detach(netdev);
if (netif_running(netdev))
@@ -2432,7 +2431,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
}
- return retval;
+ return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index dc5fbc2704f3..318db5f77fdb 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -25,7 +25,7 @@ static void ionic_watchdog_cb(struct timer_list *t)
hb = ionic_heartbeat_check(ionic);
if (hb >= 0)
- ionic_link_status_check_request(ionic->lif, false);
+ ionic_link_status_check_request(ionic->lif, CAN_NOT_SLEEP);
}
void ionic_init_devinfo(struct ionic *ionic)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 6c243b17312c..690768ff0143 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,8 +12,10 @@
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
-#define IONIC_MIN_TXRX_DESC 16
+#define IONIC_MIN_TXRX_DESC 64
#define IONIC_DEF_TXRX_DESC 4096
+#define IONIC_RX_FILL_THRESHOLD 16
+#define IONIC_RX_FILL_DIV 8
#define IONIC_LIFS_MAX 1024
#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index a12df3946a07..deabd813e3fe 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -123,6 +123,12 @@ static void ionic_link_status_check(struct ionic_lif *lif)
link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
if (link_up) {
+ if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+ mutex_lock(&lif->queue_lock);
+ ionic_start_queues(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
+
if (!netif_carrier_ok(netdev)) {
u32 link_speed;
@@ -132,12 +138,6 @@ static void ionic_link_status_check(struct ionic_lif *lif)
link_speed / 1000);
netif_carrier_on(netdev);
}
-
- if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
- mutex_lock(&lif->queue_lock);
- ionic_start_queues(lif);
- mutex_unlock(&lif->queue_lock);
- }
} else {
if (netif_carrier_ok(netdev)) {
netdev_info(netdev, "Link down\n");
@@ -1074,22 +1074,22 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, true, true);
+ return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
}
static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, true, false);
+ return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
}
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, false, true);
+ return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
}
static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, false, false);
+ return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
}
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
@@ -1129,38 +1129,10 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
lif->rx_mode = rx_mode;
}
-static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode,
- bool from_ndo)
-{
- struct ionic_deferred_work *work;
-
- if (from_ndo) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "%s OOM\n", __func__);
- return;
- }
- work->type = IONIC_DW_TYPE_RX_MODE;
- work->rx_mode = rx_mode;
- netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- } else {
- ionic_lif_rx_mode(lif, rx_mode);
- }
-}
-
-static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo)
-{
- if (from_ndo)
- __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
- else
- __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
-
-}
-
-static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
+static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_deferred_work *work;
unsigned int nfilters;
unsigned int rx_mode;
@@ -1177,7 +1149,10 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
* we remove our overflow flag and check the netdev flags
* to see if we can disable NIC PROMISC
*/
- ionic_dev_uc_sync(netdev, from_ndo);
+ if (can_sleep)
+ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
+ else
+ __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if (netdev_uc_count(netdev) + 1 > nfilters) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
@@ -1189,7 +1164,10 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
}
/* same for multicast */
- ionic_dev_uc_sync(netdev, from_ndo);
+ if (can_sleep)
+ __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
+ else
+ __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
if (netdev_mc_count(netdev) > nfilters) {
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
@@ -1200,13 +1178,26 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
}
- if (lif->rx_mode != rx_mode)
- _ionic_lif_rx_mode(lif, rx_mode, from_ndo);
+ if (lif->rx_mode != rx_mode) {
+ if (!can_sleep) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "%s OOM\n", __func__);
+ return;
+ }
+ work->type = IONIC_DW_TYPE_RX_MODE;
+ work->rx_mode = rx_mode;
+ netdev_dbg(lif->netdev, "deferred: rx_mode\n");
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ } else {
+ ionic_lif_rx_mode(lif, rx_mode);
+ }
+ }
}
static void ionic_ndo_set_rx_mode(struct net_device *netdev)
{
- ionic_set_rx_mode(netdev, true);
+ ionic_set_rx_mode(netdev, CAN_NOT_SLEEP);
}
static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
@@ -1625,6 +1616,24 @@ static void ionic_lif_rss_deinit(struct ionic_lif *lif)
ionic_lif_rss_config(lif, 0x0, NULL, NULL);
}
+static void ionic_lif_quiesce(struct ionic_lif *lif)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_STATE,
+ .state = IONIC_LIF_QUIESCE,
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
+}
+
static void ionic_txrx_disable(struct ionic_lif *lif)
{
unsigned int i;
@@ -1639,6 +1648,8 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++)
err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
}
+
+ ionic_lif_quiesce(lif);
}
static void ionic_txrx_deinit(struct ionic_lif *lif)
@@ -1773,7 +1784,7 @@ static int ionic_txrx_init(struct ionic_lif *lif)
if (lif->netdev->features & NETIF_F_RXHASH)
ionic_lif_rss_init(lif);
- ionic_set_rx_mode(lif->netdev, false);
+ ionic_set_rx_mode(lif->netdev, CAN_SLEEP);
return 0;
@@ -2781,7 +2792,7 @@ static int ionic_station_set(struct ionic_lif *lif)
*/
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
netdev->dev_addr))
- ionic_lif_addr(lif, netdev->dev_addr, true, true);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
} else {
/* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -2798,7 +2809,7 @@ static int ionic_station_set(struct ionic_lif *lif)
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, true, true);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
return 0;
}
@@ -2959,6 +2970,8 @@ int ionic_lif_register(struct ionic_lif *lif)
dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
return err;
}
+
+ ionic_link_status_check_request(lif, true);
lif->registered = true;
ionic_lif_set_netdev_info(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 0224dfd24b8a..9bed42719ae5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -13,6 +13,12 @@
#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
+
+#define ADD_ADDR true
+#define DEL_ADDR false
+#define CAN_SLEEP true
+#define CAN_NOT_SLEEP false
+
#define IONIC_RX_COPYBREAK_DEFAULT 256
#define IONIC_TX_BUDGET_DEFAULT 256
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index b3d2250c77d0..9156c9825a16 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -392,11 +392,6 @@ void ionic_rx_fill(struct ionic_queue *q)
q->dbval | q->head_idx);
}
-static void ionic_rx_fill_cb(void *arg)
-{
- ionic_rx_fill(arg);
-}
-
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
@@ -480,6 +475,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *cq = napi_to_cq(napi);
struct ionic_dev *idev;
struct ionic_lif *lif;
+ u16 rx_fill_threshold;
u32 work_done = 0;
u32 flags = 0;
@@ -489,7 +485,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done = ionic_cq_service(cq, budget,
ionic_rx_service, NULL, NULL);
- if (work_done)
+ rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
+ cq->num_descs / IONIC_RX_FILL_DIV);
+ if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -518,6 +516,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
+ u16 rx_fill_threshold;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
@@ -531,8 +530,11 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
rx_work_done = ionic_cq_service(rxcq, budget,
ionic_rx_service, NULL, NULL);
- if (rx_work_done)
- ionic_rx_fill_cb(rxcq->bound_q);
+
+ rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
+ rxcq->num_descs / IONIC_RX_FILL_DIV);
+ if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
+ ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(qcq);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 85d9c3e30c69..1acf7128c146 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -67,7 +67,7 @@
#define R8169_REGS_SIZE 256
#define R8169_RX_BUF_SIZE (SZ_16K - 1)
-#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
+#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */
#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
@@ -584,12 +584,6 @@ enum rtl_flag {
RTL_FLAG_MAX
};
-struct rtl8169_stats {
- u64 packets;
- u64 bytes;
- struct u64_stats_sync syncp;
-};
-
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
@@ -600,8 +594,6 @@ struct rtl8169_private {
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_tx;
- struct rtl8169_stats rx_stats;
- struct rtl8169_stats tx_stats;
struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
dma_addr_t TxPhyAddr;
@@ -700,27 +692,6 @@ static bool rtl_supports_eee(struct rtl8169_private *tp)
tp->mac_version != RTL_GIGA_MAC_VER_39;
}
-static void rtl_get_priv_stats(struct rtl8169_stats *stats,
- u64 *pkts, u64 *bytes)
-{
- unsigned int start;
-
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- *pkts = stats->packets;
- *bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-}
-
-static void rtl_inc_priv_stats(struct rtl8169_stats *stats,
- u64 pkts, u64 bytes)
-{
- u64_stats_update_begin(&stats->syncp);
- stats->packets += pkts;
- stats->bytes += bytes;
- u64_stats_update_end(&stats->syncp);
-}
-
static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
{
int i;
@@ -4170,13 +4141,13 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return true;
}
-static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
- unsigned int nr_frags)
+static bool rtl_tx_slots_avail(struct rtl8169_private *tp)
{
- unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
+ unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC
+ - READ_ONCE(tp->cur_tx);
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
- return slots_avail > nr_frags;
+ return slots_avail > MAX_SKB_FRAGS;
}
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
@@ -4209,17 +4180,12 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
bool stop_queue, door_bell;
u32 opts[2];
- txd_first = tp->TxDescArray + entry;
-
- if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
+ if (unlikely(!rtl_tx_slots_avail(tp))) {
if (net_ratelimit())
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
- if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn))
- goto err_stop_0;
-
opts[1] = rtl8169_tx_vlan_tag(skb);
opts[0] = 0;
@@ -4232,6 +4198,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
entry, false)))
goto err_dma_0;
+ txd_first = tp->TxDescArray + entry;
+
if (frags) {
if (rtl8169_xmit_frags(tp, skb, opts, entry))
goto err_dma_1;
@@ -4254,22 +4222,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
/* rtl_tx needs to see descriptor changes before updated tp->cur_tx */
smp_wmb();
- tp->cur_tx += frags + 1;
+ WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
- stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ stop_queue = !rtl_tx_slots_avail(tp);
if (unlikely(stop_queue)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
smp_wmb();
netif_stop_queue(dev);
- door_bell = true;
- }
-
- if (door_bell)
- rtl8169_doorbell(tp);
-
- if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -4277,11 +4238,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* status and forget to wake up queue, a racing rtl_tx thread
* can't.
*/
- smp_mb();
- if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
+ smp_mb__after_atomic();
+ if (rtl_tx_slots_avail(tp))
netif_start_queue(dev);
+ door_bell = true;
}
+ if (door_bell)
+ rtl8169_doorbell(tp);
+
return NETDEV_TX_OK;
err_dma_1:
@@ -4390,12 +4355,11 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
int budget)
{
- unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
+ unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0;
dirty_tx = tp->dirty_tx;
- smp_rmb();
- for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) {
+ while (READ_ONCE(tp->cur_tx) != dirty_tx) {
unsigned int entry = dirty_tx % NUM_TX_DESC;
struct sk_buff *skb = tp->tx_skb[entry].skb;
u32 status;
@@ -4416,10 +4380,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
if (tp->dirty_tx != dirty_tx) {
netdev_completed_queue(dev, pkts_compl, bytes_compl);
+ dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
- rtl_inc_priv_stats(&tp->tx_stats, pkts_compl, bytes_compl);
-
- tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
* - refresh cur_tx ring index and queue status (read barrier)
@@ -4427,11 +4389,9 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* a racing xmit thread can only have a right view of the
* ring status.
*/
- smp_mb();
- if (netif_queue_stopped(dev) &&
- rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
+ smp_store_mb(tp->dirty_tx, dirty_tx);
+ if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp))
netif_wake_queue(dev);
- }
/*
* 8168 hack: TxPoll requests are lost when the Tx packets are
* too close. Let's kick an extra TxPoll request when a burst
@@ -4539,7 +4499,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
napi_gro_receive(&tp->napi, skb);
- rtl_inc_priv_stats(&tp->rx_stats, 1, pkt_size);
+ dev_sw_netstats_rx_add(dev, pkt_size);
release_descriptor:
rtl8169_mark_to_asic(desc);
}
@@ -4721,6 +4681,7 @@ static int rtl_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
+ unsigned long irqflags;
int retval = -ENOMEM;
pm_runtime_get_sync(&pdev->dev);
@@ -4732,7 +4693,7 @@ static int rtl_open(struct net_device *dev)
tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr, GFP_KERNEL);
if (!tp->TxDescArray)
- goto err_pm_runtime_put;
+ goto out;
tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr, GFP_KERNEL);
@@ -4745,8 +4706,9 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
+ irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED;
retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
- IRQF_SHARED, dev->name, tp);
+ irqflags, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
@@ -4757,9 +4719,9 @@ static int rtl_open(struct net_device *dev)
rtl8169_up(tp);
rtl8169_init_counter_offsets(tp);
netif_start_queue(dev);
-
- pm_runtime_put_sync(&pdev->dev);
out:
+ pm_runtime_put_sync(&pdev->dev);
+
return retval;
err_free_irq:
@@ -4775,8 +4737,6 @@ err_free_tx_0:
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
tp->TxDescArray = NULL;
-err_pm_runtime_put:
- pm_runtime_put_noidle(&pdev->dev);
goto out;
}
@@ -4790,9 +4750,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_get_noresume(&pdev->dev);
netdev_stats_to_stats64(stats, &dev->stats);
-
- rtl_get_priv_stats(&tp->rx_stats, &stats->rx_packets, &stats->rx_bytes);
- rtl_get_priv_stats(&tp->tx_stats, &stats->tx_packets, &stats->tx_bytes);
+ dev_fetch_sw_netstats(stats, dev->tstats);
/*
* Fetch additional counter values missing in stats collected by driver
@@ -5263,6 +5221,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
+ dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
+ struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
if (rc)
@@ -5340,8 +5303,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
INIT_WORK(&tp->wk.work, rtl_task);
- u64_stats_init(&tp->rx_stats.syncp);
- u64_stats_init(&tp->tx_stats.syncp);
rtl_init_mac_address(tp);
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 2590cab53e54..1f981dfe4bdc 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -285,7 +285,13 @@ typedef union efx_oword {
field10, value10, \
field11, value11, \
field12, value12, \
- field13, value13) \
+ field13, value13, \
+ field14, value14, \
+ field15, value15, \
+ field16, value16, \
+ field17, value17, \
+ field18, value18, \
+ field19, value19) \
(EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
@@ -298,7 +304,13 @@ typedef union efx_oword {
EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \
- EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)))
+ EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19)))
#define EFX_INSERT_FIELDS64(...) \
cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
@@ -340,7 +352,19 @@ typedef union efx_oword {
#endif
/* Populate an octword field with various numbers of arguments */
-#define EFX_POPULATE_OWORD_13 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_18(oword, ...) \
+ EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_17(oword, ...) \
+ EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_16(oword, ...) \
+ EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_15(oword, ...) \
+ EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_14(oword, ...) \
+ EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_13(oword, ...) \
+ EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_12(oword, ...) \
EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_11(oword, ...) \
@@ -375,7 +399,19 @@ typedef union efx_oword {
EFX_DWORD_3, 0xffffffff)
/* Populate a quadword field with various numbers of arguments */
-#define EFX_POPULATE_QWORD_13 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_18(qword, ...) \
+ EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_17(qword, ...) \
+ EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_16(qword, ...) \
+ EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_15(qword, ...) \
+ EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_14(qword, ...) \
+ EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_13(qword, ...) \
+ EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_12(qword, ...) \
EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_11(qword, ...) \
@@ -408,7 +444,19 @@ typedef union efx_oword {
EFX_DWORD_1, 0xffffffff)
/* Populate a dword field with various numbers of arguments */
-#define EFX_POPULATE_DWORD_13 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_18(dword, ...) \
+ EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_17(dword, ...) \
+ EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_16(dword, ...) \
+ EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_15(dword, ...) \
+ EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_14(dword, ...) \
+ EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_13(dword, ...) \
+ EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_12(dword, ...) \
EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_11(dword, ...) \
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 3148fe770356..518268ce2064 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -182,8 +182,20 @@ static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
if (rc)
return rc;
- if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3))
- efx->net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3)) {
+ struct net_device *net_dev = efx->net_dev;
+ netdev_features_t tso = NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM;
+
+ net_dev->features |= tso;
+ net_dev->hw_features |= tso;
+ net_dev->hw_enc_features |= tso;
+ /* EF100 HW can only offload outer checksums if they are UDP,
+ * so for GRE_CSUM we have to use GSO_PARTIAL.
+ */
+ net_dev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ }
efx->num_mac_stats = MCDI_WORD(outbuf,
GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
netif_dbg(efx, probe, efx->net_dev,
@@ -686,7 +698,7 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx,
#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
- NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX)
+ NETIF_F_HW_VLAN_CTAG_TX)
const struct efx_nic_type ef100_pf_nic_type = {
.revision = EFX_REV_EF100,
@@ -1101,6 +1113,9 @@ static int ef100_probe_main(struct efx_nic *efx)
nic_data->efx = efx;
net_dev->features |= efx->type->offload_features;
net_dev->hw_features |= efx->type->offload_features;
+ net_dev->hw_enc_features |= efx->type->offload_features;
+ net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
/* Populate design-parameter defaults */
nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index a90e5a9d2a37..26ef51d6b542 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -54,8 +54,6 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
struct efx_nic *efx = tx_queue->efx;
struct ef100_nic_data *nic_data;
struct efx_tx_buffer *buffer;
- struct tcphdr *tcphdr;
- struct iphdr *iphdr;
size_t header_len;
u32 mss;
@@ -98,20 +96,6 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
buffer->unmap_len = 0;
buffer->skb = skb;
++tx_queue->insert_count;
-
- /* Adjust the TCP checksum to exclude the total length, since we set
- * ED_INNER_IP_LEN in the descriptor.
- */
- tcphdr = tcp_hdr(skb);
- if (skb_is_gso_v6(skb)) {
- tcphdr->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- } else {
- iphdr = ip_hdr(skb);
- tcphdr->check = ~csum_tcpudp_magic(iphdr->saddr, iphdr->daddr,
- 0, IPPROTO_TCP, 0);
- }
return true;
}
@@ -203,34 +187,66 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
struct efx_tx_buffer *buffer, efx_oword_t *txd,
unsigned int segment_count)
{
- u32 mangleid = (efx->net_dev->features & NETIF_F_TSO_MANGLEID) ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID ?
- ESE_GZ_TX_DESC_IP4_ID_NO_OP :
- ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
- u16 vlan_enable = efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX ?
- skb_vlan_tag_present(skb) : 0;
+ bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
+ unsigned int outer_ip_offset, outer_l4_offset;
u16 vlan_tci = skb_vlan_tag_get(skb);
u32 mss = skb_shinfo(skb)->gso_size;
+ bool encap = skb->encapsulation;
+ bool udp_encap = false;
+ u16 vlan_enable = 0;
+ struct tcphdr *tcp;
+ bool outer_csum;
+ u32 paylen;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ vlan_enable = skb_vlan_tag_present(skb);
len = skb->len - buffer->len;
/* We use 1 for the TSO descriptor and 1 for the header */
payload_segs = segment_count - 2;
- ip_offset = skb_network_offset(skb);
- tcp_offset = skb_transport_offset(skb);
+ if (encap) {
+ outer_ip_offset = skb_network_offset(skb);
+ outer_l4_offset = skb_transport_offset(skb);
+ ip_offset = skb_inner_network_offset(skb);
+ tcp_offset = skb_inner_transport_offset(skb);
+ if (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM))
+ udp_encap = true;
+ } else {
+ ip_offset = skb_network_offset(skb);
+ tcp_offset = skb_transport_offset(skb);
+ outer_ip_offset = outer_l4_offset = 0;
+ }
+ outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM;
+
+ /* subtract TCP payload length from inner checksum */
+ tcp = (void *)skb->data + tcp_offset;
+ paylen = skb->len - tcp_offset;
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
- EFX_POPULATE_OWORD_13(*txd,
+ EFX_POPULATE_OWORD_19(*txd,
ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO,
ESF_GZ_TX_TSO_MSS, mss,
ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1,
ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs,
ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1,
ESF_GZ_TX_TSO_PAYLOAD_LEN, len,
+ ESF_GZ_TX_TSO_CSO_OUTER_L4, outer_csum,
ESF_GZ_TX_TSO_CSO_INNER_L4, 1,
ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1,
ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1,
ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid,
ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
+ ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_ip_offset >> 1,
+ ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
+ ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
+ ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
+ ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
+ ESE_GZ_TX_DESC_IP4_ID_NO_OP,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
);
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index b77e427e6729..c52a38df0e0d 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -8,7 +8,7 @@ config NET_VENDOR_SMSC
default y
depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
ISA || MAC || MIPS || NIOS2 || PCI || \
- PCMCIA || SUPERH || XTENSA || H8300
+ PCMCIA || SUPERH || XTENSA || H8300 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -39,7 +39,7 @@ config SMC91X
select MII
depends on !OF || GPIOLIB
depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
- MIPS || NIOS2 || SUPERH || XTENSA || H8300
+ MIPS || NIOS2 || SUPERH || XTENSA || H8300 || COMPILE_TEST
help
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -78,7 +78,7 @@ config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
select MII
- depends on (ARM || SUPERH)
+ depends on (ARM || SUPERH || COMPILE_TEST)
help
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 01069dfaf75c..22cdbf12c823 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -102,7 +102,10 @@ MODULE_ALIAS("platform:smc911x");
#define PRINTK(dev, args...) netdev_info(dev, args)
#else
-#define DBG(n, dev, args...) do { } while (0)
+#define DBG(n, dev, args...) \
+ while (0) { \
+ netdev_dbg(dev, args); \
+ }
#define PRINTK(dev, args...) netdev_dbg(dev, args)
#endif
@@ -462,9 +465,9 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
skb->len;
#else
- buf = (char*)((u32)skb->data & ~0x3);
- len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
- cmdA = (((u32)skb->data & 0x3) << 16) |
+ buf = (char *)((uintptr_t)skb->data & ~0x3);
+ len = (skb->len + 3 + ((uintptr_t)skb->data & 3)) & ~0x3;
+ cmdA = (((uintptr_t)skb->data & 0x3) << 16) |
TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
skb->len;
#endif
@@ -879,7 +882,7 @@ static void smc911x_phy_configure(struct work_struct *work)
int phyaddr = lp->mii.phy_id;
int my_phy_caps; /* My PHY capabilities */
int my_ad_caps; /* My Advertised capabilities */
- int status;
+ int status __always_unused;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
@@ -973,7 +976,7 @@ static void smc911x_phy_interrupt(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int phyaddr = lp->mii.phy_id;
- int status;
+ int status __always_unused;
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
@@ -2044,8 +2047,6 @@ static int smc911x_drv_probe(struct platform_device *pdev)
void __iomem *addr;
int ret;
- /* ndev is not valid yet, so avoid passing it in. */
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index f6b73afd1879..742a1f7a838c 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -703,7 +703,8 @@ static void smc_tx(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- unsigned int saved_packet, packet_no, tx_status, pkt_len;
+ unsigned int saved_packet, packet_no, tx_status;
+ unsigned int pkt_len __always_unused;
DBG(3, dev, "%s\n", __func__);
@@ -2191,6 +2192,12 @@ MODULE_DEVICE_TABLE(of, smc91x_match);
/**
* of_try_set_control_gpio - configure a gpio if it exists
+ * @dev: net device
+ * @desc: where to store the GPIO descriptor, if it exists
+ * @name: name of the GPIO in DT
+ * @index: index of the GPIO in DT
+ * @value: set the GPIO to this value
+ * @nsdelay: delay before setting the GPIO
*/
static int try_toggle_control_gpio(struct device *dev,
struct gpio_desc **desc,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index df7de50497a0..6f271c46368d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -402,6 +402,7 @@ struct dma_features {
/* Default LPI timers */
#define STMMAC_DEFAULT_LIT_LS 0x3E8
#define STMMAC_DEFAULT_TWT_LS 0x1E
+#define STMMAC_ET_MAX 0xFFFFF
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 2342d497348e..27254b27d7ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -119,23 +119,23 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
return 0;
}
-static void *dwc_qos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat_dat,
- struct stmmac_resources *stmmac_res)
+static int dwc_qos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
{
int err;
plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(plat_dat->stmmac_clk)) {
dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- return ERR_CAST(plat_dat->stmmac_clk);
+ return PTR_ERR(plat_dat->stmmac_clk);
}
err = clk_prepare_enable(plat_dat->stmmac_clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
err);
- return ERR_PTR(err);
+ return err;
}
plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
@@ -152,11 +152,11 @@ static void *dwc_qos_probe(struct platform_device *pdev,
goto disable;
}
- return NULL;
+ return 0;
disable:
clk_disable_unprepare(plat_dat->stmmac_clk);
- return ERR_PTR(err);
+ return err;
}
static int dwc_qos_remove(struct platform_device *pdev)
@@ -267,19 +267,17 @@ static int tegra_eqos_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void *tegra_eqos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
- struct stmmac_resources *res)
+static int tegra_eqos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res)
{
struct device *dev = &pdev->dev;
struct tegra_eqos *eqos;
int err;
eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
- if (!eqos) {
- err = -ENOMEM;
- goto error;
- }
+ if (!eqos)
+ return -ENOMEM;
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
@@ -368,9 +366,7 @@ bypass_clk_reset_gpio:
if (err < 0)
goto reset;
-out:
- return eqos;
-
+ return 0;
reset:
reset_control_assert(eqos->rst);
reset_phy:
@@ -384,8 +380,7 @@ disable_slave:
disable_master:
clk_disable_unprepare(eqos->clk_master);
error:
- eqos = ERR_PTR(err);
- goto out;
+ return err;
}
static int tegra_eqos_remove(struct platform_device *pdev)
@@ -403,9 +398,9 @@ static int tegra_eqos_remove(struct platform_device *pdev)
}
struct dwc_eth_dwmac_data {
- void *(*probe)(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
- struct stmmac_resources *res);
+ int (*probe)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res);
int (*remove)(struct platform_device *pdev);
};
@@ -424,7 +419,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- void *priv;
int ret;
data = device_get_match_data(&pdev->dev);
@@ -448,10 +442,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- priv = data->probe(pdev, plat_dat, &stmmac_res);
- if (IS_ERR(priv)) {
- ret = PTR_ERR(priv);
-
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
+ if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to probe subdriver: %d\n",
ret);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 81ee0a071b4e..a2e80c89de2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -236,6 +236,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
int ret;
int i;
+ plat->phy_addr = -1;
plat->clk_csr = 5;
plat->has_gmac = 0;
plat->has_gmac4 = 1;
@@ -345,7 +346,6 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
@@ -362,7 +362,6 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
return ehl_common_data(pdev, plat);
@@ -376,7 +375,6 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_addr = 1;
return ehl_common_data(pdev, plat);
}
@@ -408,7 +406,6 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 3;
- plat->phy_addr = 1;
return ehl_common_data(pdev, plat);
}
@@ -450,7 +447,6 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 5afcf05bbf9c..dc0b8b6d180d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -299,7 +299,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
dev_err(dwmac->dev, "unsupported phy-mode %s\n",
phy_modes(dwmac->phy_mode));
return -EINVAL;
- };
+ }
if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
if (!dwmac->timing_adj_clk) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 592b043f9676..82df91c130f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -176,9 +176,11 @@ enum power_event {
*/
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
+#define GMAC4_LPI_ENTRY_TIMER 0xd8
/* LPI control and status defines */
#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
+#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 002791b77356..3ed4f4cda7f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -379,6 +379,27 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
+static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int value = et & STMMAC_ET_MAX;
+ int regval;
+
+ /* Program LPI entry timer value into register */
+ writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
+
+ /* Enable/disable LPI entry timer */
+ regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+ regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+
+ if (et)
+ regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
+ else
+ regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
+
+ writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -1164,6 +1185,7 @@ const struct stmmac_ops dwmac4_ops = {
.get_umac_addr = dwmac4_get_umac_addr,
.set_eee_mode = dwmac4_set_eee_mode,
.reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1206,6 +1228,7 @@ const struct stmmac_ops dwmac410_ops = {
.get_umac_addr = dwmac4_get_umac_addr,
.set_eee_mode = dwmac4_set_eee_mode,
.reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1249,6 +1272,7 @@ const struct stmmac_ops dwmac510_ops = {
.get_umac_addr = dwmac4_get_umac_addr,
.set_eee_mode = dwmac4_set_eee_mode,
.reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e2dca9b6e992..b40b2e0667bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -337,6 +337,7 @@ struct stmmac_ops {
void (*set_eee_mode)(struct mac_device_info *hw,
bool en_tx_lpi_clockgating);
void (*reset_eee_mode)(struct mac_device_info *hw);
+ void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
@@ -439,6 +440,8 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
#define stmmac_reset_eee_mode(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
+#define stmmac_set_eee_lpi_timer(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
#define stmmac_set_eee_timer(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
#define stmmac_set_eee_pls(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 727e68dfaf1c..c88ee8ea4245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -207,6 +207,7 @@ struct stmmac_priv {
int tx_lpi_timer;
int tx_lpi_enabled;
int eee_tw_timer;
+ bool eee_sw_timer_en;
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ba45fe237512..c8770e9668a1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -294,6 +294,16 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
+static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
+{
+ int tx_lpi_timer;
+
+ /* Clear/set the SW EEE timer flag based on LPI ET enablement */
+ priv->eee_sw_timer_en = en ? 0 : 1;
+ tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
+ stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
+}
+
/**
* stmmac_enable_eee_mode - check and enter in LPI mode
* @priv: driver private structure
@@ -327,6 +337,11 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
*/
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
+ if (!priv->eee_sw_timer_en) {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ return;
+ }
+
stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
@@ -376,6 +391,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if (!priv->eee_active) {
if (priv->eee_enabled) {
netdev_dbg(priv->dev, "disable EEE\n");
+ stmmac_lpi_entry_timer_config(priv, 0);
del_timer_sync(&priv->eee_ctrl_timer);
stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
}
@@ -389,7 +405,15 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
eee_tw_timer);
}
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+ stmmac_lpi_entry_timer_config(priv, 1);
+ } else {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(priv->tx_lpi_timer));
+ }
mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
@@ -2044,7 +2068,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
}
- if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
+ priv->eee_sw_timer_en) {
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
@@ -3306,7 +3331,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q = &priv->tx_queue[queue];
first_tx = tx_q->cur_tx;
- if (priv->tx_path_in_lpi_mode)
+ if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
stmmac_disable_eee_mode(priv);
/* Manage oversized TCP frames for GMAC4 device */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index af34a4cadbb0..6dc9f10414e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -399,6 +399,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ void *ret;
int rc;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
@@ -576,12 +577,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
clk_prepare_enable(plat->stmmac_clk);
}
- plat->pclk = devm_clk_get(&pdev->dev, "pclk");
+ plat->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
if (IS_ERR(plat->pclk)) {
- if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
- goto error_pclk_get;
-
- plat->pclk = NULL;
+ ret = plat->pclk;
+ goto error_pclk_get;
}
clk_prepare_enable(plat->pclk);
@@ -596,14 +595,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
}
- plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
- STMMAC_RESOURCE_NAME);
+ plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
if (IS_ERR(plat->stmmac_rst)) {
- if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
- goto error_hw_init;
-
- dev_info(&pdev->dev, "no reset control found\n");
- plat->stmmac_rst = NULL;
+ ret = plat->stmmac_rst;
+ goto error_hw_init;
}
return plat;
@@ -613,7 +609,7 @@ error_hw_init:
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
- return ERR_PTR(-EPROBE_DEFER);
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 501d676fd88b..766e8866bbef 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -241,8 +241,8 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
if (!vid)
unreg_mcast = port_mask;
dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
- ret = cpsw_ale_add_vlan(common->ale, vid, port_mask,
- unreg_mcast, port_mask, 0);
+ ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
+ unreg_mcast, port_mask, 0);
pm_runtime_put(common->dev);
return ret;
@@ -252,6 +252,7 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
if (!netif_running(ndev) || !vid)
@@ -264,14 +265,15 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
}
dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
- ret = cpsw_ale_del_vlan(common->ale, vid, 0);
+ ret = cpsw_ale_del_vlan(common->ale, vid,
+ BIT(port->port_id) | ALE_PORT_HOST);
pm_runtime_put(common->dev);
return ret;
}
-static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port,
- bool promisc)
+static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
+ bool promisc)
{
struct am65_cpsw_common *common = port->common;
@@ -296,7 +298,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
bool promisc;
promisc = !!(ndev->flags & IFF_PROMISC);
- am65_cpsw_slave_set_promisc_2g(port, promisc);
+ am65_cpsw_slave_set_promisc(port, promisc);
if (promisc)
return;
@@ -373,7 +375,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
- cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = skb;
@@ -426,9 +428,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
writel(common->rx_flow_id_base,
host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
/* en tx crc offload */
- if (features & NETIF_F_HW_CSUM)
- writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
+ writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL);
am65_cpsw_nuss_set_p0_ptype(common);
@@ -629,13 +629,13 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
- if (port->slave.mac_only)
+ if (port->slave.mac_only) {
/* enable mac-only mode on port */
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_MACONLY, 1);
- if (AM65_CPSW_IS_CPSW2G(common))
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_NOLEARN, 1);
+ }
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
@@ -767,7 +767,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
return ret;
}
- if (desc_dma & 0x1) {
+ if (cppi5_desc_is_tdcm(desc_dma)) {
dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
return 0;
}
@@ -911,10 +911,57 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static struct sk_buff *
+am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_tx;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+
+ ndev = skb->dev;
+
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ return skb;
+}
+
+static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
+ struct netdev_queue *netif_txq)
+{
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* Check whether the queue is stopped due to stalled
+ * tx dma, if the queue is stopped then wake the queue
+ * as we have free desc for tx
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+
+ __netif_tx_unlock(netif_txq);
+ }
+}
+
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget)
{
- struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
@@ -923,41 +970,68 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
- void **swdata;
tx_chn = &common->tx_chns[chn];
while (true) {
- struct am65_cpsw_ndev_priv *ndev_priv;
- struct am65_cpsw_ndev_stats *stats;
-
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ spin_unlock(&tx_chn->lock);
if (res == -ENODATA)
break;
- if (desc_dma & 0x1) {
+ if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete);
break;
}
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
-
+ skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
+ total_bytes = skb->len;
ndev = skb->dev;
+ napi_consume_skb(skb, budget);
+ num_tx++;
- am65_cpts_tx_timestamp(common->cpts, skb);
+ netif_txq = netdev_get_tx_queue(ndev, chn);
- ndev_priv = netdev_priv(ndev);
- stats = this_cpu_ptr(ndev_priv->stats);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+
+ dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
+
+ return num_tx;
+}
+
+static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
+ int chn, unsigned int budget)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned int total_bytes = 0;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+
+ tx_chn = &common->tx_chns[chn];
+
+ while (true) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&common->tdown_cnt))
+ complete(&common->tdown_complete);
+ break;
+ }
+
+ skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
+
+ ndev = skb->dev;
total_bytes += skb->len;
napi_consume_skb(skb, budget);
num_tx++;
@@ -970,19 +1044,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
- if (netif_tx_queue_stopped(netif_txq)) {
- /* Check whether the queue is stopped due to stalled tx dma,
- * if the queue is stopped then wake the queue as
- * we have free desc for tx
- */
- __netif_tx_lock(netif_txq, smp_processor_id());
- if (netif_running(ndev) &&
- (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS))
- netif_tx_wake_queue(netif_txq);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
- __netif_tx_unlock(netif_txq);
- }
dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
return num_tx;
@@ -993,8 +1056,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
int num_tx;
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id,
- budget);
+ if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
+ num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
+ else
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
+
num_tx = min(num_tx, budget);
if (num_tx < budget) {
napi_complete(napi_tx);
@@ -1139,7 +1205,13 @@ done_tx:
cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
- ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (AM65_CPSW_IS_CPSW2G(common)) {
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ } else {
+ spin_lock_bh(&tx_chn->lock);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ spin_unlock_bh(&tx_chn->lock);
+ }
if (ret) {
dev_err(dev, "can't push desc %d\n", ret);
/* inform bql */
@@ -1369,32 +1441,7 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
-static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev,
- netdev_features_t features)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- netdev_features_t changes = features ^ ndev->features;
- struct am65_cpsw_host *host_p;
-
- host_p = am65_common_get_host(common);
-
- if (changes & NETIF_F_HW_CSUM) {
- bool enable = !!(features & NETIF_F_HW_CSUM);
-
- dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n",
- enable ? "ON" : "OFF");
- if (enable)
- writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
- else
- writel(0,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
- }
-
- return 0;
-}
-
-static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
+static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
.ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
@@ -1406,7 +1453,6 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
- .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
};
@@ -1417,7 +1463,6 @@ static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
if (!port->disabled)
return;
- common->disabled_ports_mask |= BIT(port->port_id);
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
@@ -1496,6 +1541,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
snprintf(tx_chn->tx_chn_name,
sizeof(tx_chn->tx_chn_name), "tx%d", i);
+ spin_lock_init(&tx_chn->lock);
tx_chn->common = common;
tx_chn->id = i;
tx_chn->descs_num = max_desc_num;
@@ -1515,9 +1561,8 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
tx_chn->tx_chn_name,
&tx_cfg);
if (IS_ERR(tx_chn->tx_chn)) {
- ret = PTR_ERR(tx_chn->tx_chn);
- dev_err(dev, "Failed to request tx dma channel %d\n",
- ret);
+ ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
+ "Failed to request tx dma channel\n");
goto err;
}
@@ -1588,8 +1633,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
- ret = PTR_ERR(rx_chn->rx_chn);
- dev_err(dev, "Failed to request rx dma channel %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
+ "Failed to request rx dma channel\n");
goto err;
}
@@ -1606,7 +1651,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
};
struct k3_ring_cfg fdqring_cfg = {
.elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_MESSAGE,
.flags = K3_RINGACC_RING_SHARED,
};
struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
@@ -1620,6 +1664,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
i, &rx_flow_cfg);
@@ -1725,6 +1770,13 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
return ret;
}
common->cpts = cpts;
+ /* Forbid PM runtime if CPTS is running.
+ * K3 CPSWxG modules may completely lose context during ON->OFF
+ * transitions depending on integration.
+ * AM65x/J721E MCU CPSW2G: false
+ * J721E MAIN_CPSW9G: true
+ */
+ pm_runtime_forbid(dev);
return 0;
}
@@ -1778,8 +1830,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return PTR_ERR(port->slave.mac_sl);
port->disabled = !of_device_is_available(port_np);
- if (port->disabled)
+ if (port->disabled) {
+ common->disabled_ports_mask |= BIT(port->port_id);
continue;
+ }
port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
if (IS_ERR(port->slave.ifphy)) {
@@ -1795,12 +1849,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
- port_np, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register fixed-link phy %pOF\n",
+ port_np);
port->slave.phy_node = of_node_get(port_np);
} else {
port->slave.phy_node =
@@ -1833,6 +1885,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
}
of_node_put(node);
+ /* is there at least one ext.port */
+ if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
+ dev_err(dev, "No Ext. port are available\n");
+ return -ENODEV;
+ }
+
return 0;
}
@@ -1843,14 +1901,18 @@ static void am65_cpsw_pcpu_stats_free(void *data)
free_percpu(stats);
}
-static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
+static int
+am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
int ret;
- port = am65_common_get_port(common, 1);
+ port = &common->ports[port_idx];
+
+ if (port->disabled)
+ return 0;
/* alloc netdev */
port->ndev = devm_alloc_etherdev_mqs(common->dev,
@@ -1879,7 +1941,7 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
port->ndev->vlan_features |= NETIF_F_SG;
- port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g;
+ port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
/* Disable TX checksum offload by default due to HW bug */
@@ -1892,29 +1954,41 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
ndev_priv->stats);
- if (ret) {
- dev_err(dev, "Failed to add percpu stat free action %d\n", ret);
- return ret;
+ if (ret)
+ dev_err(dev, "failed to add percpu stat free action %d\n", ret);
+
+ if (!common->dma_ndev)
+ common->dma_ndev = port->ndev;
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ ret = am65_cpsw_nuss_init_port_ndev(common, i);
+ if (ret)
+ return ret;
}
- netif_napi_add(port->ndev, &common->napi_rx,
+ netif_napi_add(common->dma_ndev, &common->napi_rx,
am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
return ret;
}
-static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
+static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
- struct am65_cpsw_port *port;
int i, ret = 0;
- port = am65_common_get_port(common, 1);
-
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- netif_tx_napi_add(port->ndev, &tx_chn->napi_tx,
+ netif_tx_napi_add(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
ret = devm_request_irq(dev, tx_chn->irq,
@@ -1932,16 +2006,27 @@ err:
return ret;
}
-static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
+static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->ndev)
+ unregister_netdev(port->ndev);
+ }
+}
+
+static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
struct am65_cpsw_port *port;
- int ret = 0;
+ int ret = 0, i;
- port = am65_common_get_port(common, 1);
- ret = am65_cpsw_nuss_ndev_add_napi_2g(common);
+ ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
if (ret)
- goto err;
+ return ret;
ret = devm_request_irq(dev, common->rx_chns.irq,
am65_cpsw_nuss_rx_irq,
@@ -1949,17 +2034,31 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
if (ret) {
dev_err(dev, "failure requesting rx irq %u, %d\n",
common->rx_chns.irq, ret);
- goto err;
+ return ret;
+ }
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+
+ if (!port->ndev)
+ continue;
+
+ ret = register_netdev(port->ndev);
+ if (ret) {
+ dev_err(dev, "error registering slave net device%i %d\n",
+ i, ret);
+ goto err_cleanup_ndev;
+ }
}
- ret = register_netdev(port->ndev);
- if (ret)
- dev_err(dev, "error registering slave net device %d\n", ret);
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
-err:
+ return 0;
+
+err_cleanup_ndev:
+ am65_cpsw_nuss_cleanup_ndev(common);
return ret;
}
@@ -1972,19 +2071,7 @@ int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
if (ret)
return ret;
- return am65_cpsw_nuss_ndev_add_napi_2g(common);
-}
-
-static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
-{
- struct am65_cpsw_port *port;
- int i;
-
- for (i = 0; i < common->port_num; i++) {
- port = &common->ports[i];
- if (port->ndev)
- unregister_netdev(port->ndev);
- }
+ return am65_cpsw_nuss_ndev_add_tx_napi(common);
}
struct am65_cpsw_soc_pdata {
@@ -2005,10 +2092,14 @@ static const struct soc_device_attribute am65_cpsw_socinfo[] = {
static const struct am65_cpsw_pdata am65x_sr1_0 = {
.quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
+ .ale_dev_id = "am65x-cpsw2g",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
static const struct am65_cpsw_pdata j721e_pdata = {
.quirks = 0,
+ .ale_dev_id = "am65x-cpsw2g",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
@@ -2068,9 +2159,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return -ENOENT;
of_node_put(node);
- if (common->port_num != 1)
- return -EOPNOTSUPP;
-
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
common->tx_ch_num = 1;
@@ -2089,13 +2177,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return -ENOMEM;
clk = devm_clk_get(dev, "fck");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
-
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "error getting fck clock %d\n", ret);
- return ret;
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
common->bus_freq = clk_get_rate(clk);
pm_runtime_enable(dev);
@@ -2145,7 +2228,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
ale_params.ale_ports = common->port_num + 1;
ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
- ale_params.dev_id = "am65x-cpsw2g";
+ ale_params.dev_id = common->pdata.ale_dev_id;
ale_params.bus_freq = common->bus_freq;
common->ale = cpsw_ale_create(&ale_params);
@@ -2165,11 +2248,11 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
dev_set_drvdata(dev, common);
- ret = am65_cpsw_nuss_init_ndev_2g(common);
+ ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
goto err_of_clear;
- ret = am65_cpsw_nuss_ndev_reg_2g(common);
+ ret = am65_cpsw_nuss_register_ndevs(common);
if (ret)
goto err_of_clear;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 993e1d4d3222..02aed4c0ceba 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
#include "am65-cpsw-qos.h"
struct am65_cpts;
@@ -59,6 +60,7 @@ struct am65_cpsw_tx_chn {
struct am65_cpsw_common *common;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_tx_channel *tx_chn;
+ spinlock_t lock; /* protect TX rings in multi-port mode */
int irq;
u32 id;
u32 descs_num;
@@ -77,6 +79,8 @@ struct am65_cpsw_rx_chn {
struct am65_cpsw_pdata {
u32 quirks;
+ enum k3_ring_mode fdqring_mode;
+ const char *ale_dev_id;
};
struct am65_cpsw_common {
@@ -91,6 +95,7 @@ struct am65_cpsw_common {
struct am65_cpsw_host host;
struct am65_cpsw_port *ports;
u32 disabled_ports_mask;
+ struct net_device *dma_ndev;
int usage_count; /* number of opened ports */
struct cpsw_ale *ale;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index a6a455c32628..cdc308a2aa3e 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -634,8 +634,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
return 0;
}
-static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
- u16 vid, int port_mask)
+static void cpsw_ale_vlan_del_modify_int(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int port_mask)
{
int reg_mcast, unreg_mcast;
int members, untag;
@@ -644,6 +644,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
ALE_ENT_VID_MEMBER_LIST);
members &= ~port_mask;
if (!members) {
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
return;
}
@@ -673,7 +674,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
ALE_ENT_VID_MEMBER_LIST, members);
}
-int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
@@ -684,11 +685,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask) {
- cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask);
- } else {
+ cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
+ cpsw_ale_write(ale, idx, ale_entry);
+
+ return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int members, idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ /* if !port_mask - force remove VLAN (legacy).
+ * Check if there are other VLAN members ports
+ * if no - remove VLAN.
+ * if yes it means same VLAN was added to >1 port in multi port mode, so
+ * remove port_mask ports from VLAN ALE entry excluding Host port.
+ */
+ members = cpsw_ale_vlan_get_fld(ale, ale_entry, ALE_ENT_VID_MEMBER_LIST);
+ members &= ~port_mask;
+
+ if (!port_mask || !members) {
+ /* last port or force remove - remove VLAN */
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ } else {
+ port_mask &= ~ALE_PORT_HOST;
+ cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
}
cpsw_ale_write(ale, idx, ale_entry);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 5e4a69662c5f..13fe47687fde 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -134,6 +134,7 @@ static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int untag_mask, int reg_mcast, int unreg_mcast);
+int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add);
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 985a929bb957..29747da5c514 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -227,7 +227,7 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
else
port_mask = BIT(priv->emac_port);
- ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ ret = cpsw_ale_vlan_del_modify(cpsw->ale, vid, port_mask);
if (ret != 0)
return ret;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 267c080ee084..0b2ce4bdc2c3 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -186,6 +186,7 @@ static void tlan_reset_adapter(struct net_device *);
static void tlan_finish_reset(struct net_device *);
static void tlan_set_mac(struct net_device *, int areg, char *mac);
+static void __tlan_phy_print(struct net_device *);
static void tlan_phy_print(struct net_device *);
static void tlan_phy_detect(struct net_device *);
static void tlan_phy_power_down(struct net_device *);
@@ -201,9 +202,11 @@ static void tlan_phy_finish_auto_neg(struct net_device *);
static int tlan_phy_dp83840a_check(struct net_device *);
*/
-static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static bool __tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
static void tlan_mii_send_data(u16, u32, unsigned);
static void tlan_mii_sync(u16);
+static void __tlan_mii_write_reg(struct net_device *, u16, u16, u16);
static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
static void tlan_ee_send_start(u16);
@@ -242,23 +245,20 @@ static u32
tlan_handle_rx_eoc
};
-static inline void
+static void
tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
{
struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
- if (!in_irq())
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
if (priv->timer.function != NULL &&
priv->timer_type != TLAN_TIMER_ACTIVITY) {
- if (!in_irq())
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
return;
}
priv->timer.function = tlan_timer;
- if (!in_irq())
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->timer_set_at = jiffies;
priv->timer_type = type;
@@ -1703,22 +1703,22 @@ static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
dev->name, (unsigned) net_sts);
}
if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
- tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
- tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
if (!(tlphy_sts & TLAN_TS_POLOK) &&
!(tlphy_ctl & TLAN_TC_SWAPOL)) {
tlphy_ctl |= TLAN_TC_SWAPOL;
- tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
- tlphy_ctl);
+ __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
} else if ((tlphy_sts & TLAN_TS_POLOK) &&
(tlphy_ctl & TLAN_TC_SWAPOL)) {
tlphy_ctl &= ~TLAN_TC_SWAPOL;
- tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
- tlphy_ctl);
+ __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
}
if (debug)
- tlan_phy_print(dev);
+ __tlan_phy_print(dev);
}
}
@@ -2379,7 +2379,7 @@ ThunderLAN driver PHY layer routines
/*********************************************************************
- * tlan_phy_print
+ * __tlan_phy_print
*
* Returns:
* Nothing
@@ -2391,11 +2391,13 @@ ThunderLAN driver PHY layer routines
*
********************************************************************/
-static void tlan_phy_print(struct net_device *dev)
+static void __tlan_phy_print(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 i, data0, data1, data2, data3, phy;
+ lockdep_assert_held(&priv->lock);
+
phy = priv->phy[priv->phy_num];
if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
@@ -2404,10 +2406,10 @@ static void tlan_phy_print(struct net_device *dev)
netdev_info(dev, "PHY 0x%02x\n", phy);
pr_info(" Off. +0 +1 +2 +3\n");
for (i = 0; i < 0x20; i += 4) {
- tlan_mii_read_reg(dev, phy, i, &data0);
- tlan_mii_read_reg(dev, phy, i + 1, &data1);
- tlan_mii_read_reg(dev, phy, i + 2, &data2);
- tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ __tlan_mii_read_reg(dev, phy, i, &data0);
+ __tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ __tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ __tlan_mii_read_reg(dev, phy, i + 3, &data3);
pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
i, data0, data1, data2, data3);
}
@@ -2417,7 +2419,15 @@ static void tlan_phy_print(struct net_device *dev)
}
+static void tlan_phy_print(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_phy_print(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
/*********************************************************************
@@ -2795,7 +2805,7 @@ these routines are based on the information in chap. 2 of the
/***************************************************************
- * tlan_mii_read_reg
+ * __tlan_mii_read_reg
*
* Returns:
* false if ack received ok
@@ -2819,7 +2829,7 @@ these routines are based on the information in chap. 2 of the
**************************************************************/
static bool
-tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
+__tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 nack;
u16 sio, tmp;
@@ -2827,15 +2837,13 @@ tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
bool err;
int minten;
struct tlan_priv *priv = netdev_priv(dev);
- unsigned long flags = 0;
+
+ lockdep_assert_held(&priv->lock);
err = false;
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
- if (!in_irq())
- spin_lock_irqsave(&priv->lock, flags);
-
tlan_mii_sync(dev->base_addr);
minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
@@ -2881,15 +2889,19 @@ tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
*val = tmp;
- if (!in_irq())
- spin_unlock_irqrestore(&priv->lock, flags);
-
return err;
-
}
+static void tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg,
+ u16 *val)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
-
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_mii_read_reg(dev, phy, reg, val);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
/***************************************************************
* tlan_mii_send_data
@@ -2971,7 +2983,7 @@ static void tlan_mii_sync(u16 base_port)
/***************************************************************
- * tlan_mii_write_reg
+ * __tlan_mii_write_reg
*
* Returns:
* Nothing
@@ -2991,19 +3003,17 @@ static void tlan_mii_sync(u16 base_port)
**************************************************************/
static void
-tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+__tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
{
u16 sio;
int minten;
- unsigned long flags = 0;
struct tlan_priv *priv = netdev_priv(dev);
+ lockdep_assert_held(&priv->lock);
+
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
- if (!in_irq())
- spin_lock_irqsave(&priv->lock, flags);
-
tlan_mii_sync(dev->base_addr);
minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
@@ -3024,12 +3034,18 @@ tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
if (minten)
tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
- if (!in_irq())
- spin_unlock_irqrestore(&priv->lock, flags);
-
}
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_mii_write_reg(dev, phy, reg, val);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
/*****************************************************************************
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index d0d0d4fe9d40..3b2137d1f4c6 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
+ depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || COMPILE_TEST
select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index f34c7903ff52..a03c3ca1b28d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -378,6 +378,7 @@ struct axidma_bd {
* @dev: Pointer to device structure
* @phy_node: Pointer to device node structure
* @mii_bus: Pointer to MII bus structure
+ * @mii_clk_div: MII bus clock divider value
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
@@ -419,11 +420,15 @@ struct axienet_local {
struct phylink *phylink;
struct phylink_config phylink_config;
+ /* Reference to PCS/PMA PHY if used */
+ struct mdio_device *pcs_phy;
+
/* Clock for AXI bus */
struct clk *clk;
/* MDIO bus data */
struct mii_bus *mii_bus; /* MII bus reference */
+ u8 mii_clk_div; /* MII bus clock divider value */
/* IO registers, dma functions and IRQs */
resource_size_t regs_start;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9aafd3ecdaa4..6fea980acf64 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1049,20 +1049,13 @@ static int axienet_open(struct net_device *ndev)
dev_dbg(&ndev->dev, "axienet_open()\n");
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
+ /* When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
ret = axienet_device_reset(ndev);
- if (ret == 0)
- ret = axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
- if (ret < 0)
- return ret;
ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
if (ret) {
@@ -1156,9 +1149,7 @@ static int axienet_stop(struct net_device *ndev)
/* Do a reset to ensure DMA is really stopped */
mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
__axienet_device_reset(lp);
- axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
cancel_work_sync(&lp->dma_err_task);
@@ -1517,10 +1508,27 @@ static void axienet_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
phylink_set(mask, Pause);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 1000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ break;
+ fallthrough;
+ case PHY_INTERFACE_MODE_MII:
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 10baseT_Full);
+ default:
+ break;
+ }
bitmap_and(supported, supported, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -1533,38 +1541,46 @@ static void axienet_mac_pcs_get_state(struct phylink_config *config,
{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
- u32 emmc_reg, fcc_reg;
-
- state->interface = lp->phy_mode;
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- if (emmc_reg & XAE_EMMC_LINKSPD_1000)
- state->speed = SPEED_1000;
- else if (emmc_reg & XAE_EMMC_LINKSPD_100)
- state->speed = SPEED_100;
- else
- state->speed = SPEED_10;
-
- state->pause = 0;
- fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (fcc_reg & XAE_FCC_FCTX_MASK)
- state->pause |= MLO_PAUSE_TX;
- if (fcc_reg & XAE_FCC_FCRX_MASK)
- state->pause |= MLO_PAUSE_RX;
-
- state->an_complete = 0;
- state->duplex = 1;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
+ break;
+ default:
+ break;
+ }
}
static void axienet_mac_an_restart(struct phylink_config *config)
{
- /* Unsupported, do nothing */
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
}
static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- /* nothing meaningful to do */
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
+ state->interface,
+ state->advertising);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to configure PCS: %d\n",
+ ret);
+ break;
+
+ default:
+ break;
+ }
}
static void axienet_mac_link_down(struct phylink_config *config,
@@ -1644,16 +1660,12 @@ static void axienet_dma_err_handler(struct work_struct *work)
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
+ /* When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
__axienet_device_reset(lp);
- axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
for (i = 0; i < lp->tx_bd_num; i++) {
@@ -1999,6 +2011,20 @@ static int axienet_probe(struct platform_device *pdev)
dev_warn(&pdev->dev,
"error registering MDIO bus: %d\n", ret);
}
+ if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
+ if (!lp->phy_node) {
+ dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+ ret = -EINVAL;
+ goto free_netdev;
+ }
+ lp->pcs_phy = of_mdio_find_device(lp->phy_node);
+ if (!lp->pcs_phy) {
+ ret = -EPROBE_DEFER;
+ goto free_netdev;
+ }
+ lp->phylink_config.pcs_poll = true;
+ }
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
@@ -2036,6 +2062,9 @@ static int axienet_remove(struct platform_device *pdev)
if (lp->phylink)
phylink_destroy(lp->phylink);
+ if (lp->pcs_phy)
+ put_device(&lp->pcs_phy->dev);
+
axienet_mdio_teardown(lp);
clk_disable_unprepare(lp->clk);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 435ed308d990..9c014cee34b2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -30,6 +30,23 @@ static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
1, 20000);
}
+/* Enable the MDIO MDC. Called prior to a read/write operation */
+static void axienet_mdio_mdc_enable(struct axienet_local *lp)
+{
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET,
+ ((u32)lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK));
+}
+
+/* Disable the MDIO MDC. Called after a read/write operation*/
+static void axienet_mdio_mdc_disable(struct axienet_local *lp)
+{
+ u32 mc_reg;
+
+ mc_reg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET,
+ (mc_reg & ~XAE_MDIO_MC_MDIOEN_MASK));
+}
+
/**
* axienet_mdio_read - MDIO interface read function
* @bus: Pointer to mii bus structure
@@ -48,9 +65,13 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
int ret;
struct axienet_local *lp = bus->priv;
+ axienet_mdio_mdc_enable(lp);
+
ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
+ if (ret < 0) {
+ axienet_mdio_mdc_disable(lp);
return ret;
+ }
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
(((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
@@ -61,14 +82,17 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
XAE_MDIO_MCR_OP_READ_MASK));
ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
+ if (ret < 0) {
+ axienet_mdio_mdc_disable(lp);
return ret;
+ }
rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF;
dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
+ axienet_mdio_mdc_disable(lp);
return rc;
}
@@ -94,9 +118,13 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
phy_id, reg, val);
+ axienet_mdio_mdc_enable(lp);
+
ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
+ if (ret < 0) {
+ axienet_mdio_mdc_disable(lp);
return ret;
+ }
axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
@@ -108,8 +136,11 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
XAE_MDIO_MCR_OP_WRITE_MASK));
ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
+ if (ret < 0) {
+ axienet_mdio_mdc_disable(lp);
return ret;
+ }
+ axienet_mdio_mdc_disable(lp);
return 0;
}
@@ -124,7 +155,9 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
**/
int axienet_mdio_enable(struct axienet_local *lp)
{
- u32 clk_div, host_clock;
+ u32 host_clock;
+
+ lp->mii_clk_div = 0;
if (lp->clk) {
host_clock = clk_get_rate(lp->clk);
@@ -176,19 +209,19 @@ int axienet_mdio_enable(struct axienet_local *lp)
* "clock-frequency" from the CPU
*/
- clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
+ lp->mii_clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
* 1 to the clock divisor or we will surely be above 2.5 MHz
*/
if (host_clock % (MAX_MDIO_FREQ * 2))
- clk_div++;
+ lp->mii_clk_div++;
netdev_dbg(lp->ndev,
"Setting MDIO clock divisor to %u/%u Hz host clock.\n",
- clk_div, host_clock);
+ lp->mii_clk_div, host_clock);
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, clk_div | XAE_MDIO_MC_MDIOEN_MASK);
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK);
return axienet_mdio_wait_until_ready(lp);
}
@@ -211,8 +244,8 @@ void axienet_mdio_disable(struct axienet_local *lp)
* Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
* mdiobus_alloc (to allocate memory for mii bus structure) fails.
*
- * Sets up the MDIO interface by initializing the MDIO clock and enabling the
- * MDIO interface in hardware. Register the MDIO interface.
+ * Sets up the MDIO interface by initializing the MDIO clock.
+ * Register the MDIO interface.
**/
int axienet_mdio_setup(struct axienet_local *lp)
{
@@ -246,6 +279,7 @@ int axienet_mdio_setup(struct axienet_local *lp)
lp->mii_bus = NULL;
return ret;
}
+ axienet_mdio_mdc_disable(lp);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0c26f5bcc523..008b9a40faad 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -97,7 +97,7 @@
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -338,7 +338,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* if it is configured in HW
*/
- addr = (void __iomem __force *)((u32 __force)addr ^
+ addr = (void __iomem __force *)((uintptr_t __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
@@ -399,8 +399,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* will correct on subsequent calls
*/
if (drvdata->rx_ping_pong != 0)
- addr = (void __iomem __force *)((u32 __force)addr ^
- XEL_BUFFER_OFFSET);
+ addr = (void __iomem __force *)
+ ((uintptr_t __force)addr ^
+ XEL_BUFFER_OFFSET);
else
return 0; /* No data was available */
@@ -518,6 +519,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
/**
* xemaclite_tx_timeout - Callback for Tx Timeout
* @dev: Pointer to the network device
+ * @txqueue: Unused
*
* This function is called when Tx time out occurs for Emaclite device.
*/
@@ -1191,9 +1193,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
dev_info(dev,
- "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
+ "Xilinx EmacLite at 0x%08X mapped to 0x%08lX, irq=%d\n",
(unsigned int __force)ndev->mem_start,
- (unsigned int __force)lp->base_addr, ndev->irq);
+ (unsigned long __force)lp->base_addr, ndev->irq);
return 0;
error:
diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index cc9ac572423e..e9b9614639cd 100644
--- a/drivers/net/fddi/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
@@ -22,10 +22,6 @@
#include <linux/bitrev.h>
#include <linux/pci.h>
-#ifndef lint
-static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
-#endif
-
/*
* PCM active state
*/
diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c
index 15c503f43727..2f5f5f26bb43 100644
--- a/drivers/net/fddi/skfp/ecm.c
+++ b/drivers/net/fddi/skfp/ecm.c
@@ -40,10 +40,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)ecm.c 2.7 99/08/05 (C) SK " ;
-#endif
-
/*
* FSM Macros
*/
@@ -147,10 +143,11 @@ static void ecm_fsm(struct s_smc *smc, int cmd)
/* For AIX event notification: */
/* Is a disconnect command remotely issued ? */
if (cmd == EC_DISCONNECT &&
- smc->mib.fddiSMTRemoteDisconnectFlag == TRUE)
+ smc->mib.fddiSMTRemoteDisconnectFlag == TRUE) {
AIX_EVENT (smc, (u_long) CIO_HARD_FAIL, (u_long)
FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc),
smt_get_error_word(smc) );
+ }
/*jd 05-Aug-1999 Bug #10419 "Port Disconnect fails at Dup MAc Cond."*/
if (cmd == EC_CONNECT) {
diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c
index afd5ca39f43b..35110c0c00a0 100644
--- a/drivers/net/fddi/skfp/ess.c
+++ b/drivers/net/fddi/skfp/ess.c
@@ -40,7 +40,6 @@
#ifdef ESS
#ifndef lint
-static const char ID_sccs[] = "@(#)ess.c 1.10 96/02/23 (C) SK" ;
#define LINT_USE(x)
#else
#define LINT_USE(x) (x)=(x)
diff --git a/drivers/net/fddi/skfp/hwt.c b/drivers/net/fddi/skfp/hwt.c
index 32804ed049cd..5577b8e14b73 100644
--- a/drivers/net/fddi/skfp/hwt.c
+++ b/drivers/net/fddi/skfp/hwt.c
@@ -27,10 +27,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)hwt.c 1.13 97/04/23 (C) SK " ;
-#endif
-
/*
* Prototypes of local functions.
*/
diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c
index 554cde8d6073..90e8df6d9a88 100644
--- a/drivers/net/fddi/skfp/pcmplc.c
+++ b/drivers/net/fddi/skfp/pcmplc.c
@@ -45,10 +45,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)pcmplc.c 2.55 99/08/05 (C) SK " ;
-#endif
-
#ifdef FDDI_MIB
extern int snmp_fddi_trap(
#ifdef ANSIC
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 14f10b4cab0f..563fb7f0b327 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -24,10 +24,6 @@
#ifndef SLIM_SMT
-#ifndef lint
-static const char ID_sccs[] = "@(#)pmf.c 1.37 97/08/04 (C) SK " ;
-#endif
-
static int smt_authorize(struct s_smc *smc, struct smt_header *sm);
static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm);
static const struct s_p_tab* smt_get_ptab(u_short para);
diff --git a/drivers/net/fddi/skfp/queue.c b/drivers/net/fddi/skfp/queue.c
index ba022f723bd7..abe155ad777f 100644
--- a/drivers/net/fddi/skfp/queue.c
+++ b/drivers/net/fddi/skfp/queue.c
@@ -18,10 +18,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)queue.c 2.9 97/08/04 (C) SK " ;
-#endif
-
#define PRINTF(a,b,c)
/*
diff --git a/drivers/net/fddi/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c
index c0e62c25332c..37a89675dbeb 100644
--- a/drivers/net/fddi/skfp/rmt.c
+++ b/drivers/net/fddi/skfp/rmt.c
@@ -45,10 +45,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)rmt.c 2.13 99/07/02 (C) SK " ;
-#endif
-
/*
* FSM Macros
*/
diff --git a/drivers/net/fddi/skfp/smtdef.c b/drivers/net/fddi/skfp/smtdef.c
index 0bebde3c6cb9..99cc9a549bd7 100644
--- a/drivers/net/fddi/skfp/smtdef.c
+++ b/drivers/net/fddi/skfp/smtdef.c
@@ -22,10 +22,6 @@
#define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata"
#endif
-#ifndef lint
-static const char ID_sccs[] = "@(#)smtdef.c 2.53 99/08/11 (C) SK " ;
-#endif
-
/*
* defaults
*/
diff --git a/drivers/net/fddi/skfp/smtinit.c b/drivers/net/fddi/skfp/smtinit.c
index 01f6c75cbea8..c9898c83fe30 100644
--- a/drivers/net/fddi/skfp/smtinit.c
+++ b/drivers/net/fddi/skfp/smtinit.c
@@ -19,10 +19,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)smtinit.c 1.15 97/05/06 (C) SK " ;
-#endif
-
void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
/* define global debug variable */
diff --git a/drivers/net/fddi/skfp/smttimer.c b/drivers/net/fddi/skfp/smttimer.c
index 9d549bb14f07..5f3e5d7bf415 100644
--- a/drivers/net/fddi/skfp/smttimer.c
+++ b/drivers/net/fddi/skfp/smttimer.c
@@ -18,10 +18,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)smttimer.c 2.4 97/08/04 (C) SK " ;
-#endif
-
static void timer_done(struct s_smc *smc, int restart);
void smt_timer_init(struct s_smc *smc)
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f98d060b0f5b..4cad68c3f49b 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -26,11 +26,6 @@
#ifndef SLIM_SMT
#ifndef BOOT
-#ifndef lint
-static const char ID_sccs[] = "@(#)srf.c 1.18 97/08/04 (C) SK " ;
-#endif
-
-
/*
* function declarations
*/
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 1426bfc009bc..ef9b5ea9073b 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1137,7 +1137,7 @@ static const struct net_device_ops geneve_netdev_ops = {
.ndo_open = geneve_open,
.ndo_stop = geneve_stop,
.ndo_start_xmit = geneve_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = geneve_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index dc668ed280b9..4c04e271f184 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -607,7 +607,7 @@ static const struct net_device_ops gtp_netdev_ops = {
.ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
};
static void gtp_link_setup(struct net_device *dev)
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index e7413a643929..9e0058154ac3 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -597,7 +597,7 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_DRIVERNAME:
if (s->ops && s->ops->drvname) {
- strncpy(bi.data.drivername, s->ops->drvname,
+ strlcpy(bi.data.drivername, s->ops->drvname,
sizeof(bi.data.drivername));
break;
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 4eb64709d44c..3a2824f24caa 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -316,6 +316,7 @@ struct cas_control {
* struct ca8210_test - ca8210 test interface structure
* @ca8210_dfs_spi_int: pointer to the entry in the debug fs for this device
* @up_fifo: fifo for upstream messages
+ * @readq: read wait queue
*
* This structure stores all the data pertaining to the debug interface
*/
@@ -346,12 +347,12 @@ struct ca8210_test {
* @ca8210_is_awake: nonzero if ca8210 is initialised, ready for comms
* @sync_down: counts number of downstream synchronous commands
* @sync_up: counts number of upstream synchronous commands
- * @spi_transfer_complete completion object for a single spi_transfer
- * @sync_exchange_complete completion object for a complete synchronous API
- * exchange
- * @promiscuous whether the ca8210 is in promiscuous mode or not
+ * @spi_transfer_complete: completion object for a single spi_transfer
+ * @sync_exchange_complete: completion object for a complete synchronous API
+ * exchange
+ * @promiscuous: whether the ca8210 is in promiscuous mode or not
* @retries: records how many times the current pending spi
- * transfer has been retried
+ * transfer has been retried
*/
struct ca8210_priv {
struct spi_device *spi;
@@ -420,8 +421,8 @@ struct fulladdr {
/**
* union macaddr: generic MAC address container
- * @short_addr: 16-bit short address
- * @ieee_address: 64-bit extended address as LE byte array
+ * @short_address: 16-bit short address
+ * @ieee_address: 64-bit extended address as LE byte array
*
*/
union macaddr {
@@ -714,7 +715,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
/**
* ca8210_rx_done() - Calls various message dispatches responding to a received
* command
- * @arg: Pointer to the cas_control object for the relevant spi transfer
+ * @cas_ctl: Pointer to the cas_control object for the relevant spi transfer
*
* Presents a received SAP command from the ca8210 to the Cascoda EVBME, test
* interface and network driver.
@@ -1277,7 +1278,6 @@ static u8 tdme_channelinit(u8 channel, void *device_ref)
* @pib_attribute: Attribute Number
* @pib_attribute_length: Attribute length
* @pib_attribute_value: Pointer to Attribute Value
- * @device_ref: Nondescript pointer to target device
*
* Return: 802.15.4 status code of checks
*/
@@ -3046,7 +3046,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
/**
* ca8210_remove() - Shut down a ca8210 upon being disconnected
- * @priv: Pointer to private data structure
+ * @spi_device: Pointer to spi device data structure
*
* Return: 0 or linux error code
*/
@@ -3096,7 +3096,7 @@ static int ca8210_remove(struct spi_device *spi_device)
/**
* ca8210_probe() - Set up a connected ca8210 upon being detected by the system
- * @priv: Pointer to private data structure
+ * @spi_device: Pointer to spi device data structure
*
* Return: 0 or linux error code
*/
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 7fe306e76281..fa63d4dee0ba 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -187,8 +187,7 @@ static const struct net_device_ops ifb_netdev_ops = {
};
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
- NETIF_F_GSO_ENCAP_ALL | \
+ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 6bfac1efe037..55151960a698 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -21,6 +21,7 @@
#include "gsi_trans.h"
#include "ipa_gsi.h"
#include "ipa_data.h"
+#include "ipa_version.h"
/**
* DOC: The IPA Generic Software Interface
@@ -108,62 +109,6 @@ struct gsi_event {
u8 chid;
};
-/* Hardware values from the error log register error code field */
-enum gsi_err_code {
- GSI_INVALID_TRE_ERR = 0x1,
- GSI_OUT_OF_BUFFERS_ERR = 0x2,
- GSI_OUT_OF_RESOURCES_ERR = 0x3,
- GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
- GSI_EVT_RING_EMPTY_ERR = 0x5,
- GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
- GSI_HWO_1_ERR = 0x8,
-};
-
-/* Hardware values from the error log register error type field */
-enum gsi_err_type {
- GSI_ERR_TYPE_GLOB = 0x1,
- GSI_ERR_TYPE_CHAN = 0x2,
- GSI_ERR_TYPE_EVT = 0x3,
-};
-
-/* Hardware values used when programming an event ring */
-enum gsi_evt_chtype {
- GSI_EVT_CHTYPE_MHI_EV = 0x0,
- GSI_EVT_CHTYPE_XHCI_EV = 0x1,
- GSI_EVT_CHTYPE_GPI_EV = 0x2,
- GSI_EVT_CHTYPE_XDCI_EV = 0x3,
-};
-
-/* Hardware values used when programming a channel */
-enum gsi_channel_protocol {
- GSI_CHANNEL_PROTOCOL_MHI = 0x0,
- GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
- GSI_CHANNEL_PROTOCOL_GPI = 0x2,
- GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
-};
-
-/* Hardware values representing an event ring immediate command opcode */
-enum gsi_evt_cmd_opcode {
- GSI_EVT_ALLOCATE = 0x0,
- GSI_EVT_RESET = 0x9,
- GSI_EVT_DE_ALLOC = 0xa,
-};
-
-/* Hardware values representing a generic immediate command opcode */
-enum gsi_generic_cmd_opcode {
- GSI_GENERIC_HALT_CHANNEL = 0x1,
- GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
-};
-
-/* Hardware values representing a channel immediate command opcode */
-enum gsi_ch_cmd_opcode {
- GSI_CH_ALLOCATE = 0x0,
- GSI_CH_START = 0x1,
- GSI_CH_STOP = 0x2,
- GSI_CH_RESET = 0x9,
- GSI_CH_DE_ALLOC = 0xa,
-};
-
/** gsi_channel_scratch_gpi - GPI protocol scratch register
* @max_outstanding_tre:
* Defines the maximum number of TREs allowed in a single transaction
@@ -229,21 +174,70 @@ static u32 gsi_channel_id(struct gsi_channel *channel)
return channel - &channel->gsi->channel[0];
}
+/* Update the GSI IRQ type register with the cached value */
+static void gsi_irq_type_update(struct gsi *gsi, u32 val)
+{
+ gsi->type_enabled_bitmap = val;
+ iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+}
+
+static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+{
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
+}
+
+static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+{
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
+}
+
+/* Turn off all GSI interrupts initially */
+static void gsi_irq_setup(struct gsi *gsi)
+{
+ /* Disable all interrupt types */
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear all type-specific interrupt masks */
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+ iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+}
+
+/* Turn off all GSI interrupts when we're all done */
+static void gsi_irq_teardown(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
{
+ bool enable_ieob = !gsi->ieob_enabled_bitmap;
u32 val;
- gsi->event_enable_bitmap |= BIT(evt_ring_id);
- val = gsi->event_enable_bitmap;
+ gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
+ val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ /* Enable the interrupt type if this is the first channel enabled */
+ if (enable_ieob)
+ gsi_irq_type_enable(gsi, GSI_IEOB);
}
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
{
u32 val;
- gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
- val = gsi->event_enable_bitmap;
+ gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
+
+ /* Disable the interrupt type if this was the last enabled channel */
+ if (!gsi->ieob_enabled_bitmap)
+ gsi_irq_type_disable(gsi, GSI_IEOB);
+
+ val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
}
@@ -252,38 +246,32 @@ static void gsi_irq_enable(struct gsi *gsi)
{
u32 val;
- /* We don't use inter-EE channel or event interrupts */
- val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
- val &= ~INTER_EE_CH_CTRL_FMASK;
- val &= ~INTER_EE_EV_CTRL_FMASK;
- iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
-
- val = GENMASK(gsi->channel_count - 1, 0);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
-
- val = GENMASK(gsi->evt_ring_count - 1, 0);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
-
- /* Each IEOB interrupt is enabled (later) as needed by channels */
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
-
- val = GSI_CNTXT_GLOB_IRQ_ALL;
- iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ /* Global interrupts include hardware error reports. Enable
+ * that so we can at least report the error should it occur.
+ */
+ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
- /* Never enable GSI_BREAK_POINT */
- val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK;
+ /* General GSI interrupts are reported to all EEs; if they occur
+ * they are unrecoverable (without reset). A breakpoint interrupt
+ * also exists, but we don't support that. We want to be notified
+ * of errors so we can report them, even if they can't be handled.
+ */
+ val = BIT(BUS_ERROR);
+ val |= BIT(CMD_FIFO_OVRFLOW);
+ val |= BIT(MCS_STACK_OVRFLOW);
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
}
-/* Disable all GSI_interrupt types */
+/* Disable all GSI interrupt types */
static void gsi_irq_disable(struct gsi *gsi)
{
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
}
/* Return the virtual address associated with a ring index */
@@ -337,13 +325,30 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
+ bool success;
u32 val;
+ /* We only perform one event ring command at a time, and event
+ * control interrupts should only occur when such a command
+ * is issued here. Only permit *this* event ring to trigger
+ * an interrupt, and only enable the event control IRQ type
+ * when we expect it to occur.
+ */
+ val = BIT(evt_ring_id);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+
+ /* Disable the interrupt again */
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+
+ if (success)
+ return 0;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
@@ -433,13 +438,29 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
+ bool success;
u32 val;
+ /* We only perform one channel command at a time, and channel
+ * control interrupts should only occur when such a command is
+ * issued here. So we only permit *this* channel to trigger
+ * an interrupt and only enable the channel control IRQ type
+ * when we expect it to occur.
+ */
+ val = BIT(channel_id);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
+ success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
- if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ /* Disable the interrupt again */
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+
+ if (success)
+ return 0;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
@@ -607,7 +628,8 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
u32 val;
- val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
+ /* We program all event rings as GPI type/protocol */
+ val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
val |= EV_INTYPE_FMASK;
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
@@ -714,8 +736,8 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Arbitrarily pick TRE 0 as the first channel element to use */
channel->tre_ring.index = 0;
- /* We program all channels to use GPI protocol */
- val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
+ /* We program all channels as GPI type/protocol */
+ val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
if (channel->toward_ipa)
val |= CHTYPE_DIR_FMASK;
val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
@@ -742,11 +764,12 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
- /* Enable the doorbell engine if requested */
- if (doorbell)
+ /* We enable the doorbell engine for IPA v3.5.1 */
+ if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
val |= USE_DB_ENG_FMASK;
- if (!channel->use_prefetch)
+ /* Starting with IPA v4.0 the command channel uses the escape buffer */
+ if (gsi->version != IPA_VERSION_3_5_1 && channel->command)
val |= USE_ESCAPE_BUF_ONLY_FMASK;
iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
@@ -829,8 +852,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
return ret;
}
-/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
+/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
@@ -838,10 +861,10 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
gsi_channel_reset_command(channel);
/* Due to a hardware quirk we may need to reset RX channels twice. */
- if (legacy && !channel->toward_ipa)
+ if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
- gsi_channel_program(channel, legacy);
+ gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
mutex_unlock(&gsi->mutex);
@@ -989,7 +1012,7 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
static void
gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
{
- if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ if (code == GSI_OUT_OF_RESOURCES) {
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
complete(&gsi->channel[channel_id].completion);
return;
@@ -1004,7 +1027,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
static void
gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
{
- if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ if (code == GSI_OUT_OF_RESOURCES) {
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
u32 channel_id = gsi_channel_id(evt_ring->channel);
@@ -1034,8 +1057,8 @@ static void gsi_isr_glob_err(struct gsi *gsi)
iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
ee = u32_get_bits(val, ERR_EE_FMASK);
- which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
type = u32_get_bits(val, ERR_TYPE_FMASK);
+ which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
code = u32_get_bits(val, ERR_CODE_FMASK);
if (type == GSI_ERR_TYPE_CHAN)
@@ -1054,7 +1077,7 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
- if (result != GENERIC_EE_SUCCESS_FVAL)
+ if (result != GENERIC_EE_SUCCESS)
dev_err(gsi->dev, "global INT1 generic result %u\n", result);
complete(&gsi->completion);
@@ -1067,15 +1090,15 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
- if (val & ERROR_INT_FMASK)
+ if (val & BIT(ERROR_INT))
gsi_isr_glob_err(gsi);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
- val &= ~ERROR_INT_FMASK;
+ val &= ~BIT(ERROR_INT);
- if (val & GP_INT1_FMASK) {
- val ^= GP_INT1_FMASK;
+ if (val & BIT(GP_INT1)) {
+ val ^= BIT(GP_INT1);
gsi_isr_gp_int1(gsi);
}
@@ -1110,8 +1133,7 @@ static void gsi_isr_general(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
- if (val)
- dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
+ dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
}
/**
@@ -1128,6 +1150,7 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
u32 intr_mask;
u32 cnt = 0;
+ /* enum gsi_irq_type_id defines GSI interrupt types */
while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
/* intr_mask contains bitmask of pending GSI interrupts */
do {
@@ -1136,19 +1159,19 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
intr_mask ^= gsi_intr;
switch (gsi_intr) {
- case CH_CTRL_FMASK:
+ case BIT(GSI_CH_CTRL):
gsi_isr_chan_ctrl(gsi);
break;
- case EV_CTRL_FMASK:
+ case BIT(GSI_EV_CTRL):
gsi_isr_evt_ctrl(gsi);
break;
- case GLOB_EE_FMASK:
+ case BIT(GSI_GLOB_EE):
gsi_isr_glob_ee(gsi);
break;
- case IEOB_FMASK:
+ case BIT(GSI_IEOB):
gsi_isr_ieob(gsi);
break;
- case GENERAL_FMASK:
+ case BIT(GSI_GENERAL):
gsi_isr_general(gsi);
break;
default:
@@ -1168,6 +1191,34 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, "gsi");
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
+ return ret;
+ }
+ gsi->irq = irq;
+
+ return 0;
+}
+
+static void gsi_irq_exit(struct gsi *gsi)
+{
+ free_irq(gsi->irq, gsi);
+}
+
/* Return the transaction associated with a transfer completion event */
static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
struct gsi_event *event)
@@ -1452,8 +1503,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi)
}
/* Setup function for a single channel */
-static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
- bool legacy)
+static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
@@ -1472,7 +1522,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
if (ret)
goto err_evt_ring_de_alloc;
- gsi_channel_program(channel, legacy);
+ gsi_channel_program(channel, true);
if (channel->toward_ipa)
netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
@@ -1511,8 +1561,19 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode)
{
struct completion *completion = &gsi->completion;
+ bool success;
u32 val;
+ /* The error global interrupt type is always enabled (until we
+ * teardown), so we won't change that. A generic EE command
+ * completes with a GSI global interrupt of type GP_INT1. We
+ * only perform one generic command at a time (to allocate or
+ * halt a modem channel) and only from this function. So we
+ * enable the GP_INT1 IRQ type here while we're expecting it.
+ */
+ val = BIT(ERROR_INT) | BIT(GP_INT1);
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
/* First zero the result code field */
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
val &= ~GENERIC_EE_RESULT_FMASK;
@@ -1523,8 +1584,13 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+
+ /* Disable the GP_INT1 IRQ type again */
+ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
+ if (success)
+ return 0;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
opcode, channel_id);
@@ -1540,16 +1606,11 @@ static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
{
- int ret;
-
- ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
- if (ret)
- dev_err(gsi->dev, "error %d halting modem channel %u\n",
- ret, channel_id);
+ (void)gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
}
/* Setup function for channels */
-static int gsi_channel_setup(struct gsi *gsi, bool legacy)
+static int gsi_channel_setup(struct gsi *gsi)
{
u32 channel_id = 0;
u32 mask;
@@ -1561,7 +1622,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy)
mutex_lock(&gsi->mutex);
do {
- ret = gsi_channel_setup_one(gsi, channel_id, legacy);
+ ret = gsi_channel_setup_one(gsi, channel_id);
if (ret)
goto err_unwind;
} while (++channel_id < gsi->channel_count);
@@ -1647,10 +1708,11 @@ static void gsi_channel_teardown(struct gsi *gsi)
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
-int gsi_setup(struct gsi *gsi, bool legacy)
+int gsi_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
u32 val;
+ int ret;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
@@ -1659,6 +1721,8 @@ int gsi_setup(struct gsi *gsi, bool legacy)
return -EIO;
}
+ gsi_irq_setup(gsi);
+
val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
@@ -1691,13 +1755,18 @@ int gsi_setup(struct gsi *gsi, bool legacy)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
- return gsi_channel_setup(gsi, legacy);
+ ret = gsi_channel_setup(gsi);
+ if (ret)
+ gsi_irq_teardown(gsi);
+
+ return ret;
}
/* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi)
{
gsi_channel_teardown(gsi);
+ gsi_irq_teardown(gsi);
}
/* Initialize a channel's event ring */
@@ -1745,7 +1814,7 @@ static void gsi_evt_ring_init(struct gsi *gsi)
u32 evt_ring_id = 0;
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
- gsi->event_enable_bitmap = 0;
+ gsi->ieob_enabled_bitmap = 0;
do
init_completion(&gsi->evt_ring[evt_ring_id].completion);
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
@@ -1814,7 +1883,7 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
/* Init function for a single channel */
static int gsi_channel_init_one(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data,
- bool command, bool prefetch)
+ bool command)
{
struct gsi_channel *channel;
u32 tre_count;
@@ -1838,7 +1907,6 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->use_prefetch = command && prefetch;
channel->tlv_count = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
@@ -1892,13 +1960,16 @@ static void gsi_channel_exit_one(struct gsi_channel *channel)
}
/* Init function for channels */
-static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
- const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc)
+static int gsi_channel_init(struct gsi *gsi, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
+ bool modem_alloc;
int ret = 0;
u32 i;
+ /* IPA v4.2 requires the AP to allocate channels for the modem */
+ modem_alloc = gsi->version == IPA_VERSION_4_2;
+
gsi_evt_ring_init(gsi);
/* The endpoint data array is indexed by endpoint name */
@@ -1916,7 +1987,7 @@ static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
continue;
}
- ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
+ ret = gsi_channel_init_one(gsi, &data[i], command);
if (ret)
goto err_unwind;
}
@@ -1952,19 +2023,19 @@ static void gsi_channel_exit(struct gsi *gsi)
}
/* Init function for GSI. GSI hardware does not need to be "ready" */
-int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
- u32 count, const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc)
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
- unsigned int irq;
int ret;
gsi_validate_build();
gsi->dev = dev;
+ gsi->version = version;
/* The GSI layer performs NAPI on all endpoints. NAPI requires a
* network device structure, but the GSI layer does not have one,
@@ -1972,55 +2043,43 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
*/
init_dummy_netdev(&gsi->dummy_dev);
- ret = platform_get_irq_byname(pdev, "gsi");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
- return ret ? : -EINVAL;
- }
- irq = ret;
-
- ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
- if (ret) {
- dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
- return ret;
- }
- gsi->irq = irq;
-
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
- ret = -ENODEV;
- goto err_free_irq;
+ return -ENODEV;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
- ret = -EINVAL;
- goto err_free_irq;
+ return -EINVAL;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
- ret = -ENOMEM;
- goto err_free_irq;
+ return -ENOMEM;
}
- ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
+ init_completion(&gsi->completion);
+
+ ret = gsi_irq_init(gsi, pdev);
if (ret)
goto err_iounmap;
+ ret = gsi_channel_init(gsi, count, data);
+ if (ret)
+ goto err_irq_exit;
+
mutex_init(&gsi->mutex);
- init_completion(&gsi->completion);
return 0;
+err_irq_exit:
+ gsi_irq_exit(gsi);
err_iounmap:
iounmap(gsi->virt);
-err_free_irq:
- free_irq(gsi->irq, gsi);
return ret;
}
@@ -2030,7 +2089,7 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- free_irq(gsi->irq, gsi);
+ gsi_irq_exit(gsi);
iounmap(gsi->virt);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 3f9f29d531c4..ecc784e3a812 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -13,6 +13,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
+#include "ipa_version.h"
+
/* Maximum number of channels and event rings supported by the driver */
#define GSI_CHANNEL_COUNT_MAX 17
#define GSI_EVT_RING_COUNT_MAX 13
@@ -31,10 +33,10 @@ struct ipa_gsi_endpoint_data;
/* Execution environment IDs */
enum gsi_ee_id {
- GSI_EE_AP = 0,
- GSI_EE_MODEM = 1,
- GSI_EE_UC = 2,
- GSI_EE_TZ = 3,
+ GSI_EE_AP = 0x0,
+ GSI_EE_MODEM = 0x1,
+ GSI_EE_UC = 0x2,
+ GSI_EE_TZ = 0x3,
};
struct gsi_ring {
@@ -94,12 +96,12 @@ struct gsi_trans_info {
/* Hardware values signifying the state of a channel */
enum gsi_channel_state {
- GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
- GSI_CHANNEL_STATE_ALLOCATED = 0x1,
- GSI_CHANNEL_STATE_STARTED = 0x2,
- GSI_CHANNEL_STATE_STOPPED = 0x3,
- GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
- GSI_CHANNEL_STATE_ERROR = 0xf,
+ GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
+ GSI_CHANNEL_STATE_ALLOCATED = 0x1,
+ GSI_CHANNEL_STATE_STARTED = 0x2,
+ GSI_CHANNEL_STATE_STOPPED = 0x3,
+ GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_ERROR = 0xf,
};
/* We only care about channels between IPA and AP */
@@ -107,7 +109,6 @@ struct gsi_channel {
struct gsi *gsi;
bool toward_ipa;
bool command; /* AP command TX channel or not */
- bool use_prefetch; /* use prefetch (else escape buf) */
u8 tlv_count; /* # entries in TLV FIFO */
u16 tre_count;
@@ -147,6 +148,7 @@ struct gsi_evt_ring {
struct gsi {
struct device *dev; /* Same as IPA device */
+ enum ipa_version version;
struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt;
u32 irq;
@@ -154,9 +156,10 @@ struct gsi {
u32 evt_ring_count;
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
- u32 event_bitmap;
- u32 event_enable_bitmap;
- u32 modem_channel_bitmap;
+ u32 event_bitmap; /* allocated event rings */
+ u32 modem_channel_bitmap; /* modem channels to allocate */
+ u32 type_enabled_bitmap; /* GSI IRQ types enabled */
+ u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
struct completion completion; /* for global EE commands */
struct mutex mutex; /* protects commands, programming */
};
@@ -164,14 +167,13 @@ struct gsi {
/**
* gsi_setup() - Set up the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
- * @legacy: Set up for legacy hardware
*
* Return: 0 if successful, or a negative error code
*
* Performs initialization that must wait until the GSI hardware is
* ready (including firmware loaded).
*/
-int gsi_setup(struct gsi *gsi, bool legacy);
+int gsi_setup(struct gsi *gsi);
/**
* gsi_teardown() - Tear down GSI subsystem
@@ -219,15 +221,15 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
- * @legacy: Legacy behavior
+ * @doorbell: Whether to (possibly) enable the doorbell engine
*
- * Reset a channel and reconfigure it. The @legacy flag indicates
- * that some steps should be done differently for legacy hardware.
+ * Reset a channel and reconfigure it. The @doorbell flag indicates
+ * that the doorbell engine should be enabled if needed.
*
* GSI hardware relinquishes ownership of all pending receive buffer
* transactions and they will complete with their cancelled flag set.
*/
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy);
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
@@ -236,15 +238,18 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
* gsi_init() - Initialize the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
* @pdev: IPA platform device
+ * @version: IPA hardware version (implies GSI version)
+ * @count: Number of entries in the configuration data array
+ * @data: Endpoint and channel configuration data
*
* Return: 0 if successful, or a negative error code
*
* Early stage initialization of the GSI subsystem, performing tasks
* that can be done before the GSI hardware is ready to use.
*/
-int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
- u32 count, const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc);
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
/**
* gsi_exit() - Exit the GSI subsystem
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 8e0e9350c383..c1799d1e8a83 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -66,12 +66,20 @@
#define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4)
#define CHID_FMASK GENMASK(12, 8)
-/* The next field is present for GSI v2.0 and above */
+/* The next field is present for IPA v4.5 and above */
#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
#define ERINDEX_FMASK GENMASK(18, 14)
#define CHSTATE_FMASK GENMASK(23, 20)
#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
+/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
+enum gsi_channel_type {
+ GSI_CHANNEL_TYPE_MHI = 0x0,
+ GSI_CHANNEL_TYPE_XHCI = 0x1,
+ GSI_CHANNEL_TYPE_GPI = 0x2,
+ GSI_CHANNEL_TYPE_XDCI = 0x3,
+};
+
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
@@ -95,7 +103,7 @@
#define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9)
-/* The next field is present for GSI v2.0 and above */
+/* The next field is only present for IPA v4.0, v4.1, and v4.2 */
#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
@@ -128,6 +136,7 @@
#define EV_INTYPE_FMASK GENMASK(16, 16)
#define EV_CHSTATE_FMASK GENMASK(23, 20)
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
+/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
@@ -216,6 +225,15 @@
#define CH_CHID_FMASK GENMASK(7, 0)
#define CH_OPCODE_FMASK GENMASK(31, 24)
+/** enum gsi_ch_cmd_opcode - CH_OPCODE field values in CH_CMD */
+enum gsi_ch_cmd_opcode {
+ GSI_CH_ALLOCATE = 0x0,
+ GSI_CH_START = 0x1,
+ GSI_CH_STOP = 0x2,
+ GSI_CH_RESET = 0x9,
+ GSI_CH_DE_ALLOC = 0xa,
+};
+
#define GSI_EV_CH_CMD_OFFSET \
GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
@@ -223,6 +241,13 @@
#define EV_CHID_FMASK GENMASK(7, 0)
#define EV_OPCODE_FMASK GENMASK(31, 24)
+/** enum gsi_evt_cmd_opcode - EV_OPCODE field values in EV_CH_CMD */
+enum gsi_evt_cmd_opcode {
+ GSI_EVT_ALLOCATE = 0x0,
+ GSI_EVT_RESET = 0x9,
+ GSI_EVT_DE_ALLOC = 0xa,
+};
+
#define GSI_GENERIC_CMD_OFFSET \
GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
@@ -231,29 +256,40 @@
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
+/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
+enum gsi_generic_cmd_opcode {
+ GSI_GENERIC_HALT_CHANNEL = 0x1,
+ GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+};
+
#define GSI_GSI_HW_PARAM_2_OFFSET \
GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
(0x0001f040 + 0x4000 * (ee))
#define IRAM_SIZE_FMASK GENMASK(2, 0)
-#define IRAM_SIZE_ONE_KB_FVAL 0
-#define IRAM_SIZE_TWO_KB_FVAL 1
-/* The next two values are available for GSI v2.0 and above */
-#define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2
-#define IRAM_SIZE_THREE_KB_FVAL 3
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
-/* Fields below are present for GSI v2.0 and above */
+/* Fields below are present for IPA v4.0 and above */
#define GSI_USE_SDMA_FMASK GENMASK(15, 15)
#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
-/* Fields below are present for GSI v2.2 and above */
+/* Fields below are present for IPA v4.2 and above */
#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
+/** enum gsi_iram_size - IRAM_SIZE field values in HW_PARAM_2 */
+enum gsi_iram_size {
+ IRAM_SIZE_ONE_KB = 0x0,
+ IRAM_SIZE_TWO_KB = 0x1,
+/* The next two values are available for IPA v4.0 and above */
+ IRAM_SIZE_TWO_N_HALF_KB = 0x2,
+ IRAM_SIZE_THREE_KB = 0x3,
+};
+
+/* IRQ condition for each type is cleared by writing type-specific register */
#define GSI_CNTXT_TYPE_IRQ_OFFSET \
GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
@@ -262,15 +298,17 @@
GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
(0x0001f088 + 0x4000 * (ee))
-/* The masks below are used for the TYPE_IRQ and TYPE_IRQ_MASK registers */
-#define CH_CTRL_FMASK GENMASK(0, 0)
-#define EV_CTRL_FMASK GENMASK(1, 1)
-#define GLOB_EE_FMASK GENMASK(2, 2)
-#define IEOB_FMASK GENMASK(3, 3)
-#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
-#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
-#define GENERAL_FMASK GENMASK(6, 6)
-#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0)
+
+/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
+enum gsi_irq_type_id {
+ GSI_CH_CTRL = 0x0, /* channel allocation, etc. */
+ GSI_EV_CTRL = 0x1, /* event ring allocation, etc. */
+ GSI_GLOB_EE = 0x2, /* global/general event */
+ GSI_IEOB = 0x3, /* TRE completion */
+ GSI_INTER_EE_CH_CTRL = 0x4, /* remote-issued stop/reset (unused) */
+ GSI_INTER_EE_EV_CTRL = 0x5, /* remote-issued event reset (unused) */
+ GSI_GENERAL = 0x6, /* general-purpose event */
+};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
@@ -329,12 +367,13 @@
GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
(0x0001f110 + 0x4000 * (ee))
-/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
-#define ERROR_INT_FMASK GENMASK(0, 0)
-#define GP_INT1_FMASK GENMASK(1, 1)
-#define GP_INT2_FMASK GENMASK(2, 2)
-#define GP_INT3_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
+/* Values here are bit positions in the GLOB_IRQ_* registers */
+enum gsi_global_irq_id {
+ ERROR_INT = 0x0,
+ GP_INT1 = 0x1,
+ GP_INT2 = 0x2,
+ GP_INT3 = 0x3,
+};
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
@@ -348,12 +387,13 @@
GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
(0x0001f128 + 0x4000 * (ee))
-/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
-#define BREAK_POINT_FMASK GENMASK(0, 0)
-#define BUS_ERROR_FMASK GENMASK(1, 1)
-#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
+/* Values here are bit positions in the (general) GSI_IRQ_* registers */
+enum gsi_general_id {
+ BREAK_POINT = 0x0,
+ BUS_ERROR = 0x1,
+ CMD_FIFO_OVRFLOW = 0x2,
+ MCS_STACK_OVRFLOW = 0x3,
+};
#define GSI_CNTXT_INTSET_OFFSET \
GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
@@ -373,6 +413,25 @@
#define ERR_TYPE_FMASK GENMASK(27, 24)
#define ERR_EE_FMASK GENMASK(31, 28)
+/** enum gsi_err_code - ERR_CODE field values in EE_ERR_LOG */
+enum gsi_err_code {
+ GSI_INVALID_TRE = 0x1,
+ GSI_OUT_OF_BUFFERS = 0x2,
+ GSI_OUT_OF_RESOURCES = 0x3,
+ GSI_UNSUPPORTED_INTER_EE_OP = 0x4,
+ GSI_EVT_RING_EMPTY = 0x5,
+ GSI_NON_ALLOCATED_EVT_ACCESS = 0x6,
+ /* 7 is not assigned */
+ GSI_HWO_1 = 0x8,
+};
+
+/** enum gsi_err_type - ERR_TYPE field values in EE_ERR_LOG */
+enum gsi_err_type {
+ GSI_ERR_TYPE_GLOB = 0x1,
+ GSI_ERR_TYPE_CHAN = 0x2,
+ GSI_ERR_TYPE_EVT = 0x3,
+};
+
#define GSI_ERROR_LOG_CLR_OFFSET \
GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
@@ -384,10 +443,18 @@
(0x0001f400 + 0x4000 * (ee))
#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
-#define GENERIC_EE_SUCCESS_FVAL 1
-#define GENERIC_EE_INCORRECT_DIRECTION_FVAL 3
-#define GENERIC_EE_INCORRECT_CHANNEL_FVAL 5
-#define GENERIC_EE_NO_RESOURCES_FVAL 7
+
+/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
+enum gsi_generic_ee_result {
+ GENERIC_EE_SUCCESS = 0x1,
+ GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2,
+ GENERIC_EE_INCORRECT_DIRECTION = 0x3,
+ GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4,
+ GENERIC_EE_INCORRECT_CHANNEL = 0x5,
+ GENERIC_EE_RETRY = 0x6,
+ GENERIC_EE_NO_RESOURCES = 0x7,
+};
+
#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */
#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index d92dd3f09b73..002e51448510 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -38,9 +38,9 @@
/* Some commands can wait until indicated pipeline stages are clear */
enum pipeline_clear_options {
- pipeline_clear_hps = 0,
- pipeline_clear_src_grp = 1,
- pipeline_clear_full = 2,
+ pipeline_clear_hps = 0x0,
+ pipeline_clear_src_grp = 0x1,
+ pipeline_clear_full = 0x2,
};
/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index f7e6f87facf7..4ed09c486abc 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -27,16 +27,16 @@ struct gsi_channel;
* a request is *not* an immediate command.
*/
enum ipa_cmd_opcode {
- IPA_CMD_NONE = 0,
- IPA_CMD_IP_V4_FILTER_INIT = 3,
- IPA_CMD_IP_V6_FILTER_INIT = 4,
- IPA_CMD_IP_V4_ROUTING_INIT = 7,
- IPA_CMD_IP_V6_ROUTING_INIT = 8,
- IPA_CMD_HDR_INIT_LOCAL = 9,
- IPA_CMD_REGISTER_WRITE = 12,
- IPA_CMD_IP_PACKET_INIT = 16,
- IPA_CMD_DMA_SHARED_MEM = 19,
- IPA_CMD_IP_PACKET_TAG_STATUS = 20,
+ IPA_CMD_NONE = 0x0,
+ IPA_CMD_IP_V4_FILTER_INIT = 0x3,
+ IPA_CMD_IP_V6_FILTER_INIT = 0x4,
+ IPA_CMD_IP_V4_ROUTING_INIT = 0x7,
+ IPA_CMD_IP_V6_ROUTING_INIT = 0x8,
+ IPA_CMD_HDR_INIT_LOCAL = 0x9,
+ IPA_CMD_REGISTER_WRITE = 0xc,
+ IPA_CMD_IP_PACKET_INIT = 0x10,
+ IPA_CMD_DMA_SHARED_MEM = 0x13,
+ IPA_CMD_IP_PACKET_TAG_STATUS = 0x14,
};
/**
@@ -50,7 +50,6 @@ struct ipa_cmd_info {
enum dma_data_direction direction;
};
-
#ifdef IPA_VALIDATE
/**
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index d4c2bc7ad24b..37dada4da680 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -24,6 +24,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_DMA_ONLY,
.config = {
+ .resource_group = 0,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
},
@@ -42,6 +43,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 0,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -65,6 +67,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.seq_type =
IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
.config = {
+ .resource_group = 0,
.checksum = true,
.qmap = true,
.status_enable = true,
@@ -88,6 +91,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 0,
.checksum = true,
.qmap = true,
.aggregation = true,
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index de2768d71ab5..bd92b619e7fe 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -26,6 +26,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_DMA_ONLY,
.config = {
+ .resource_group = 1,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
},
@@ -44,6 +45,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 1,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -67,6 +69,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.seq_type =
IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
.config = {
+ .resource_group = 1,
.checksum = true,
.qmap = true,
.status_enable = true,
@@ -90,6 +93,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 1,
.checksum = true,
.qmap = true,
.aggregation = true,
@@ -146,11 +150,11 @@ static const struct ipa_resource_src ipa_resource_src[] = {
.type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
.limits[0] = {
.min = 1,
- .max = 63,
+ .max = 255,
},
.limits[1] = {
.min = 1,
- .max = 63,
+ .max = 255,
},
},
{
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 7fc1058a5ca9..83c4b78373ef 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -45,10 +45,10 @@
* the IPA endpoint.
*/
-/* The maximum value returned by ipa_resource_group_count() */
-#define IPA_RESOURCE_GROUP_COUNT 4
+/* The maximum value returned by ipa_resource_group_{src,dst}_count() */
+#define IPA_RESOURCE_GROUP_SRC_MAX 5
+#define IPA_RESOURCE_GROUP_DST_MAX 5
-/** enum ipa_resource_type_src - source resource types */
/**
* struct gsi_channel_data - GSI channel configuration data
* @tre_count: number of TREs in the channel ring
@@ -109,6 +109,7 @@ struct ipa_endpoint_rx_data {
/**
* struct ipa_endpoint_config_data - IPA endpoint hardware configuration
+ * @resource_group: resource group to assign endpoint to
* @checksum: whether checksum offload is enabled
* @qmap: whether endpoint uses QMAP protocol
* @aggregation: whether endpoint supports aggregation
@@ -119,6 +120,7 @@ struct ipa_endpoint_rx_data {
* @rx: RX-specific endpoint information (see above)
*/
struct ipa_endpoint_config_data {
+ u32 resource_group;
bool checksum;
bool qmap;
bool aggregation;
@@ -206,7 +208,7 @@ struct ipa_resource_limits {
*/
struct ipa_resource_src {
enum ipa_resource_type_src type;
- struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_SRC_MAX];
};
/**
@@ -216,7 +218,7 @@ struct ipa_resource_src {
*/
struct ipa_resource_dst {
enum ipa_resource_type_dst type;
- struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_DST_MAX];
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index b40b711cf4bd..970730045751 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -665,8 +665,8 @@ static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
/* ...but we still need to fit into a 32-bit register */
WARN_ON(ticks > U32_MAX);
- /* IPA v3.5.1 just records the tick count */
- if (ipa->version == IPA_VERSION_3_5_1)
+ /* IPA v3.5.1 through v4.1 just record the tick count */
+ if (ipa->version < IPA_VERSION_4_2)
return (u32)ticks;
/* For IPA v4.2, the tick count is represented by base and
@@ -751,6 +751,16 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 val;
+
+ val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
@@ -1207,7 +1217,6 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
- bool legacy;
u32 retries;
u32 len = 1;
void *virt;
@@ -1269,8 +1278,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
* complete the channel reset sequence. Finish by suspending the
* channel again (if necessary).
*/
- legacy = ipa->version == IPA_VERSION_3_5_1;
- gsi_channel_reset(gsi, endpoint->channel_id, legacy);
+ gsi_channel_reset(gsi, endpoint->channel_id, true);
msleep(1);
@@ -1293,21 +1301,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
u32 channel_id = endpoint->channel_id;
struct ipa *ipa = endpoint->ipa;
bool special;
- bool legacy;
int ret = 0;
/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
* is active, we need to handle things specially to recover.
* All other cases just need to reset the underlying GSI channel.
- *
- * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
*/
- legacy = ipa->version == IPA_VERSION_3_5_1;
- special = !endpoint->toward_ipa && endpoint->data->aggregation;
+ special = ipa->version == IPA_VERSION_3_5_1 &&
+ !endpoint->toward_ipa &&
+ endpoint->data->aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
- gsi_channel_reset(&ipa->gsi, channel_id, legacy);
+ gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
dev_err(&ipa->pdev->dev,
@@ -1328,6 +1334,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
ipa_endpoint_init_deaggr(endpoint);
+ ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
ipa_endpoint_status(endpoint);
}
@@ -1538,8 +1545,8 @@ int ipa_endpoint_config(struct ipa *ipa)
val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
/* Our RX is an IPA producer */
- rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
- max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
+ rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
+ max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
if (max > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints (%u > %u)\n",
max, IPA_ENDPOINT_MAX);
@@ -1548,7 +1555,7 @@ int ipa_endpoint_config(struct ipa *ipa)
rx_mask = GENMASK(max - 1, rx_base);
/* Our TX is an IPA consumer */
- max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
+ max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
tx_mask = GENMASK(max - 1, 0);
ipa->available = rx_mask | tx_mask;
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 58a245de488e..881ecc27bd6e 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -25,7 +25,7 @@ struct ipa_gsi_endpoint_data;
#define IPA_MTU ETH_DATA_LEN
enum ipa_endpoint_name {
- IPA_ENDPOINT_AP_MODEM_TX = 0,
+ IPA_ENDPOINT_AP_MODEM_TX,
IPA_ENDPOINT_MODEM_LAN_TX,
IPA_ENDPOINT_MODEM_COMMAND_TX,
IPA_ENDPOINT_AP_COMMAND_TX,
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index cc1ea28f7bc2..61dd7605bcb6 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -139,12 +139,12 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 val;
/* assert(mask & ipa->available); */
- val = ioread32(ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET);
if (enable)
val |= mask;
else
val &= ~mask;
- iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET);
}
/* Enable TX_SUSPEND for an endpoint */
@@ -168,7 +168,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
u32 val;
val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET);
- iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_CLR_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_CLR_OFFSET);
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 727e9c5044d1..b5d63a0cd19e 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -13,22 +13,6 @@ struct ipa;
struct ipa_interrupt;
/**
- * enum ipa_irq_id - IPA interrupt type
- * @IPA_IRQ_UC_0: Microcontroller event interrupt
- * @IPA_IRQ_UC_1: Microcontroller response interrupt
- * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
- *
- * The data ready interrupt is signaled if data has arrived that is destined
- * for an AP RX endpoint whose underlying GSI channel is suspended/stopped.
- */
-enum ipa_irq_id {
- IPA_IRQ_UC_0 = 2,
- IPA_IRQ_UC_1 = 3,
- IPA_IRQ_TX_SUSPEND = 14,
- IPA_IRQ_COUNT, /* Number of interrupt types (not an index) */
-};
-
-/**
* typedef ipa_irq_handler_t - IPA interrupt handler function type
* @ipa: IPA pointer
* @irq_id: interrupt type
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index cd4d993b0bbb..3fb9c5d90b70 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -111,8 +111,7 @@ int ipa_setup(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
- /* Setup for IPA v3.5.1 has some slight differences */
- ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
+ ret = gsi_setup(&ipa->gsi);
if (ret)
return ret;
@@ -326,7 +325,7 @@ static void ipa_hardware_config(struct ipa *ipa)
/* Disable PA mask to allow HOLB drop (hardware workaround) */
val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
- val &= ~PA_MASK_EN;
+ val &= ~PA_MASK_EN_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
}
@@ -336,14 +335,16 @@ static void ipa_hardware_config(struct ipa *ipa)
ipa_hardware_config_qsb(ipa);
/* Configure aggregation granularity */
- val = ioread32(ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- val = u32_encode_bits(granularity, AGGR_GRANULARITY);
+ val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
- /* Disable hashed IPv4 and IPv6 routing and filtering for IPA v4.2 */
- if (ipa->version == IPA_VERSION_4_2)
- iowrite32(0, ipa->reg_virt + IPA_REG_FILT_ROUT_HASH_EN_OFFSET);
+ /* IPA v4.2 does not support hashed tables, so disable them */
+ if (ipa->version == IPA_VERSION_4_2) {
+ u32 offset = ipa_reg_filt_rout_hash_en_offset(ipa->version);
+
+ iowrite32(0, ipa->reg_virt + offset);
+ }
/* Enable dynamic clock division */
ipa_hardware_dcd_config(ipa);
@@ -363,52 +364,41 @@ static void ipa_hardware_deconfig(struct ipa *ipa)
#ifdef IPA_VALIDATION
-/* # IPA resources used based on version (see IPA_RESOURCE_GROUP_COUNT) */
-static int ipa_resource_group_count(struct ipa *ipa)
-{
- switch (ipa->version) {
- case IPA_VERSION_3_5_1:
- return 3;
-
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- return 4;
-
- case IPA_VERSION_4_2:
- return 1;
-
- default:
- return 0;
- }
-}
-
static bool ipa_resource_limits_valid(struct ipa *ipa,
const struct ipa_resource_data *data)
{
- u32 group_count = ipa_resource_group_count(ipa);
+ u32 group_count;
u32 i;
u32 j;
- if (!group_count)
+ /* We program at most 6 source or destination resource group limits */
+ BUILD_BUG_ON(IPA_RESOURCE_GROUP_SRC_MAX > 6);
+
+ group_count = ipa_resource_group_src_count(ipa->version);
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_SRC_MAX)
return false;
- /* Return an error if a non-zero resource group limit is specified
- * for a resource not supported by hardware.
+ /* Return an error if a non-zero resource limit is specified
+ * for a resource group not supported by hardware.
*/
for (i = 0; i < data->resource_src_count; i++) {
const struct ipa_resource_src *resource;
resource = &data->resource_src[i];
- for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ for (j = group_count; j < IPA_RESOURCE_GROUP_SRC_MAX; j++)
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
+ group_count = ipa_resource_group_dst_count(ipa->version);
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_DST_MAX)
+ return false;
+
for (i = 0; i < data->resource_dst_count; i++) {
const struct ipa_resource_dst *resource;
resource = &data->resource_dst[i];
- for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ for (j = group_count; j < IPA_RESOURCE_GROUP_DST_MAX; j++)
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
@@ -435,46 +425,64 @@ ipa_resource_config_common(struct ipa *ipa, u32 offset,
val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
- val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
- val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ if (ylimits) {
+ val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ }
iowrite32(val, ipa->reg_virt + offset);
}
-static void ipa_resource_config_src_01(struct ipa *ipa,
- const struct ipa_resource_src *resource)
+static void ipa_resource_config_src(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
{
- u32 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ u32 group_count = ipa_resource_group_src_count(ipa->version);
+ const struct ipa_resource_limits *ylimits;
+ u32 offset;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[0], &resource->limits[1]);
-}
+ offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-static void ipa_resource_config_src_23(struct ipa *ipa,
- const struct ipa_resource_src *resource)
-{
- u32 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ if (group_count < 2)
+ return;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[2], &resource->limits[3]);
-}
+ offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-static void ipa_resource_config_dst_01(struct ipa *ipa,
- const struct ipa_resource_dst *resource)
-{
- u32 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ if (group_count < 4)
+ return;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[0], &resource->limits[1]);
+ offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
}
-static void ipa_resource_config_dst_23(struct ipa *ipa,
- const struct ipa_resource_dst *resource)
+static void ipa_resource_config_dst(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
{
- u32 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ u32 group_count = ipa_resource_group_dst_count(ipa->version);
+ const struct ipa_resource_limits *ylimits;
+ u32 offset;
+
+ offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
+
+ if (group_count < 2)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
- ipa_resource_config_common(ipa, offset,
- &resource->limits[2], &resource->limits[3]);
+ if (group_count < 4)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
}
static int
@@ -485,15 +493,11 @@ ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
if (!ipa_resource_limits_valid(ipa, data))
return -EINVAL;
- for (i = 0; i < data->resource_src_count; i++) {
- ipa_resource_config_src_01(ipa, &data->resource_src[i]);
- ipa_resource_config_src_23(ipa, &data->resource_src[i]);
- }
+ for (i = 0; i < data->resource_src_count; i++)
+ ipa_resource_config_src(ipa, data->resource_src);
- for (i = 0; i < data->resource_dst_count; i++) {
- ipa_resource_config_dst_01(ipa, &data->resource_dst[i]);
- ipa_resource_config_dst_23(ipa, &data->resource_dst[i]);
- }
+ for (i = 0; i < data->resource_dst_count; i++)
+ ipa_resource_config_dst(ipa, data->resource_dst);
return 0;
}
@@ -678,16 +682,13 @@ static void ipa_validate_build(void)
*/
BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
- /* Exceeding 128 bytes makes the transaction pool *much* larger */
- BUILD_BUG_ON(sizeof(struct gsi_trans) > 128);
-
/* This is used as a divisor */
BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
/* Aggregation granularity value can't be 0, and must fit */
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
- field_max(AGGR_GRANULARITY));
+ field_max(AGGR_GRANULARITY_FMASK));
#endif /* IPA_VALIDATE */
}
@@ -720,10 +721,8 @@ static int ipa_probe(struct platform_device *pdev)
const struct ipa_data *data;
struct ipa_clock *clock;
struct rproc *rproc;
- bool modem_alloc;
bool modem_init;
struct ipa *ipa;
- bool prefetch;
phandle ph;
int ret;
@@ -785,17 +784,12 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_reg_exit;
- /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */
- prefetch = ipa->version != IPA_VERSION_3_5_1;
- /* IPA v4.2 requires the AP to allocate channels for the modem */
- modem_alloc = ipa->version == IPA_VERSION_4_2;
-
- ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count,
- data->endpoint_data, modem_alloc);
+ ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
+ data->endpoint_data);
if (ret)
goto err_mem_exit;
- /* Result is a non-zero mask endpoints that support filtering */
+ /* Result is a non-zero mask of endpoints that support filtering */
ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count,
data->endpoint_data);
if (!ipa->filter_map) {
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 2d45c444a67f..0cc3a3374caa 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -89,7 +89,7 @@ int ipa_mem_setup(struct ipa *ipa)
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
- iowrite32(ipa->mem_offset + offset,
+ iowrite32(ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset,
ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
return 0;
@@ -160,13 +160,13 @@ int ipa_mem_config(struct ipa *ipa)
mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
/* If the sizes don't match, issue a warning */
- if (ipa->mem_offset + mem_size > ipa->mem_size) {
- dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
- mem_size);
- } else if (ipa->mem_offset + mem_size < ipa->mem_size) {
+ if (ipa->mem_offset + mem_size < ipa->mem_size) {
dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
mem_size);
ipa->mem_size = mem_size;
+ } else if (ipa->mem_offset + mem_size > ipa->mem_size) {
+ dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
+ mem_size);
}
/* Prealloc DMA memory for zeroing regions */
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index cfac456cea0c..12b6621f4b0e 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -74,12 +74,12 @@ struct ipa_init_complete_ind {
/* The AP tells the modem its platform type. We assume Android. */
enum ipa_platform_type {
- IPA_QMI_PLATFORM_TYPE_INVALID = 0, /* Invalid */
- IPA_QMI_PLATFORM_TYPE_TN = 1, /* Data card */
- IPA_QMI_PLATFORM_TYPE_LE = 2, /* Data router */
- IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 3, /* Android MSM */
- IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 4, /* Windows MSM */
- IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
+ IPA_QMI_PLATFORM_TYPE_INVALID = 0x0, /* Invalid */
+ IPA_QMI_PLATFORM_TYPE_TN = 0x1, /* Data card */
+ IPA_QMI_PLATFORM_TYPE_LE = 0x2, /* Data router */
+ IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 0x3, /* Android MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 0x4, /* Windows MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
/* This defines the start and end offset of a range of memory. Both
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index e542598fd775..d02e7ecc6fc0 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -65,14 +65,14 @@ struct ipa;
* of valid bits for the register.
*/
-#define IPA_REG_ENABLED_PIPES_OFFSET 0x00000038
-
+/* The next field is not supported for IPA v4.1 */
#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
#define ENABLE_FMASK GENMASK(0, 0)
#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
+/* The remaining fields are not present for IPA v3.5.1 */
#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
@@ -110,6 +110,7 @@ struct ipa;
#define TX_0_FMASK GENMASK(19, 19)
#define TX_1_FMASK GENMASK(20, 20)
#define FNR_FMASK GENMASK(21, 21)
+/* The remaining fields are not present for IPA v3.5.1 */
#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
@@ -138,25 +139,17 @@ struct ipa;
#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
-/* The next two fields are present for IPA v4.0 and above */
+/* The next two fields are not present for IPA v3.5.1 */
#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
-static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
{
if (version == IPA_VERSION_3_5_1)
- return 0x0000010c;
+ return 0x000008c;
- return 0x000000b4;
+ return 0x0000148;
}
-/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
-
-/* The next register is present for IPA v4.2 and above */
-#define IPA_REG_FILT_ROUT_HASH_EN_OFFSET 0x00000148
-#define IPV6_ROUTER_HASH_EN GENMASK(0, 0)
-#define IPV6_FILTER_HASH_EN GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_EN GENMASK(8, 8)
-#define IPV4_FILTER_HASH_EN GENMASK(12, 12)
static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
{
@@ -166,45 +159,70 @@ static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
return 0x000014c;
}
-#define IPV6_ROUTER_HASH_FLUSH GENMASK(0, 0)
-#define IPV6_FILTER_HASH_FLUSH GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_FLUSH GENMASK(8, 8)
-#define IPV4_FILTER_HASH_FLUSH GENMASK(12, 12)
+/* The next four fields are used for the hash enable and flush registers */
+#define IPV6_ROUTER_HASH_FMASK GENMASK(0, 0)
+#define IPV6_FILTER_HASH_FMASK GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_FMASK GENMASK(8, 8)
+#define IPV4_FILTER_HASH_FMASK GENMASK(12, 12)
+
+/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
+static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000010c;
+
+ return 0x000000b4;
+}
#define IPA_REG_BCR_OFFSET 0x000001d0
-#define BCR_CMDQ_L_LACK_ONE_ENTRY BIT(0)
-#define BCR_TX_NOT_USING_BRESP BIT(1)
-#define BCR_SUSPEND_L2_IRQ BIT(3)
-#define BCR_HOLB_DROP_L2_IRQ BIT(4)
-#define BCR_DUAL_TX BIT(5)
+/* The next two fields are not present for IPA v4.2 */
+#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0)
+#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1)
+/* The next field is invalid for IPA v4.1 */
+#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2)
+/* The next two fields are not present for IPA v4.2 */
+#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3)
+#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4)
+#define BCR_DUAL_TX_FMASK GENMASK(5, 5)
+#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6)
+#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7)
+#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8)
+#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9)
/* Backward compatibility register value to use for each version */
static inline u32 ipa_reg_bcr_val(enum ipa_version version)
{
if (version == IPA_VERSION_3_5_1)
- return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_TX_NOT_USING_BRESP |
- BCR_SUSPEND_L2_IRQ | BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+ return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
+ BCR_TX_NOT_USING_BRESP_FMASK |
+ BCR_SUSPEND_L2_IRQ_FMASK |
+ BCR_HOLB_DROP_L2_IRQ_FMASK |
+ BCR_DUAL_TX_FMASK;
if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1)
- return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_SUSPEND_L2_IRQ |
- BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+ return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
+ BCR_SUSPEND_L2_IRQ_FMASK |
+ BCR_HOLB_DROP_L2_IRQ_FMASK |
+ BCR_DUAL_TX_FMASK;
return 0x00000000;
}
+/* The value of the next register must be a multiple of 8 */
#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
-#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
+#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
+
+#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
+#define AGGR_GRANULARITY_FMASK GENMASK(8, 4)
/* The internal inactivity timer clock is used for the aggregation timer */
-#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
+#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
-#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
-#define AGGR_GRANULARITY GENMASK(8, 4)
/* Compute the value to use in the AGGR_GRANULARITY field representing the
* given number of microseconds. The value is one less than the number of
- * timer ticks in the requested period. Zero not a valid granularity value.
+ * timer ticks in the requested period. 0 not a valid granularity value.
*/
static inline u32 ipa_aggr_granularity_val(u32 usec)
{
@@ -213,25 +231,25 @@ static inline u32 ipa_aggr_granularity_val(u32 usec)
#define IPA_REG_TX_CFG_OFFSET 0x000001fc
/* The first three fields are present for IPA v3.5.1 only */
-#define TX0_PREFETCH_DISABLE GENMASK(0, 0)
-#define TX1_PREFETCH_DISABLE GENMASK(1, 1)
-#define PREFETCH_ALMOST_EMPTY_SIZE GENMASK(4, 2)
-/* The next fields are present for IPA v4.0 and above */
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX0 GENMASK(5, 2)
-#define DMAW_SCND_OUTSD_PRED_THRESHOLD GENMASK(9, 6)
-#define DMAW_SCND_OUTSD_PRED_EN GENMASK(10, 10)
-#define DMAW_MAX_BEATS_256_DIS GENMASK(11, 11)
-#define PA_MASK_EN GENMASK(12, 12)
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX1 GENMASK(16, 13)
-/* The last two fields are present for IPA v4.2 and above */
-#define SSPND_PA_NO_START_STATE GENMASK(18, 18)
-#define SSPND_PA_NO_BQ_STATE GENMASK(19, 19)
+#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0)
+#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1)
+#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2)
+/* The next six fields are present for IPA v4.0 and above */
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2)
+#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6)
+#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10)
+#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11)
+#define PA_MASK_EN_FMASK GENMASK(12, 12)
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13)
+/* The next two fields are present for IPA v4.2 only */
+#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18)
+#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19)
#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
-#define BAM_MAX_PIPES_FMASK GENMASK(4, 0)
-#define BAM_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
-#define BAM_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
-#define BAM_PROD_LOWEST_FMASK GENMASK(27, 24)
+#define IPA_MAX_PIPES_FMASK GENMASK(3, 0)
+#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
+#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
+#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24)
static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
{
@@ -244,6 +262,43 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
+/* # IPA source resource groups available based on version */
+static inline u32 ipa_resource_group_src_count(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* # IPA destination resource groups available based on version */
+static inline u32 ipa_resource_group_dst_count(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_5_1:
+ return 3;
+
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Not all of the following are valid (depends on the count, above) */
#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
(0x00000400 + 0x0020 * (rt))
#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
@@ -256,13 +311,16 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
(0x00000504 + 0x0020 * (rt))
#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
(0x00000508 + 0x0020 * (rt))
+/* The next four fields are used for all resource group registers */
#define X_MIN_LIM_FMASK GENMASK(5, 0)
#define X_MAX_LIM_FMASK GENMASK(13, 8)
+/* The next two fields are not always present (if resource count is odd) */
#define Y_MIN_LIM_FMASK GENMASK(21, 16)
#define Y_MAX_LIM_FMASK GENMASK(29, 24)
#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
(0x00000800 + 0x0070 * (ep))
+/* The next field should only used for IPA v3.5.1 */
#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
#define ENDP_DELAY_FMASK GENMASK(1, 1)
@@ -273,6 +331,13 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
+enum ipa_cs_offload_en {
+ IPA_CS_OFFLOAD_NONE = 0x0,
+ IPA_CS_OFFLOAD_UL = 0x1,
+ IPA_CS_OFFLOAD_DL = 0x2,
+};
+
#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
(0x00000810 + 0x0070 * (ep))
#define HDR_LEN_FMASK GENMASK(5, 0)
@@ -308,6 +373,14 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define PAD_EN_FMASK GENMASK(29, 29)
#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
+/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
+enum ipa_mode {
+ IPA_BASIC = 0x0,
+ IPA_ENABLE_FRAMING_HDLC = 0x1,
+ IPA_ENABLE_DEFRAMING_HDLC = 0x2,
+ IPA_DMA = 0x3,
+};
+
#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
(0x00000824 + 0x0070 * (ep))
#define AGGR_EN_FMASK GENMASK(1, 0)
@@ -319,6 +392,24 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22)
#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24)
+/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_en {
+ IPA_BYPASS_AGGR = 0x0,
+ IPA_ENABLE_AGGR = 0x1,
+ IPA_ENABLE_DEAGGR = 0x2,
+};
+
+/** enum ipa_aggr_type - aggregation type field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0x0,
+ IPA_HDLC = 0x1,
+ IPA_TLP = 0x2,
+ IPA_RNDIS = 0x3,
+ IPA_GENERIC = 0x4,
+ IPA_COALESCE = 0x5,
+ IPA_QCMAP = 0x6,
+};
+
/* Valid only for RX (IPA producer) endpoints */
#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
(0x0000082c + 0x0070 * (rxep))
@@ -327,7 +418,7 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
/* Valid only for RX (IPA producer) endpoints */
#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
(0x00000830 + 0x0070 * (rxep))
-/* The next fields are present for IPA v4.2 only */
+/* The next two fields are present for IPA v4.2 only */
#define BASE_VALUE_FMASK GENMASK(4, 0)
#define SCALE_FMASK GENMASK(12, 8)
@@ -335,13 +426,24 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
(0x00000834 + 0x0070 * (txep))
#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
+#define SYSPIPE_ERR_DETECTION_FMASK GENMASK(6, 6)
#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
+#define IGNORE_MIN_PKT_ERR_FMASK GENMASK(14, 14)
#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
(0x00000838 + 0x0070 * (ep))
-#define RSRC_GRP_FMASK GENMASK(1, 0)
+/* Encoded value for RSRC_GRP endpoint register RSRC_GRP field */
+static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp)
+{
+ switch (version) {
+ case IPA_VERSION_4_2:
+ return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
+ default:
+ return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
+ }
+}
/* Valid only for TX (IPA consumer) endpoints */
#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
@@ -351,15 +453,34 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12)
+/**
+ * enum ipa_seq_type - HPS and DPS sequencer type fields in ENDP_INIT_SEQ_N
+ * @IPA_SEQ_DMA_ONLY: only DMA is performed
+ * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
+ * second packet processing pass + no decipher + microcontroller
+ * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP:
+ * packet processing + no decipher + no uCP + HPS REP DMA parser
+ * @IPA_SEQ_INVALID: invalid sequencer type
+ *
+ * The values defined here are broken into 4-bit nibbles that are written
+ * into fields of the INIT_SEQ_N endpoint registers.
+ */
+enum ipa_seq_type {
+ IPA_SEQ_DMA_ONLY = 0x0000,
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
+ IPA_SEQ_INVALID = 0xffff,
+};
+
#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
(0x00000840 + 0x0070 * (ep))
#define STATUS_EN_FMASK GENMASK(0, 0)
#define STATUS_ENDP_FMASK GENMASK(5, 1)
#define STATUS_LOCATION_FMASK GENMASK(8, 8)
-/* The next field is present for IPA v4.0 and above */
+/* The next field is not present for IPA v3.5.1 */
#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-/* "er" is either an endpoint ID (for filters) or a route ID (for routes) */
+/* The next register is only present for IPA versions that support hashing */
#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
(0x0000085c + 0x0070 * (er))
#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
@@ -394,89 +515,67 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \
(0x00003010 + 0x1000 * (ee))
+/**
+ * enum ipa_irq_id - Bit positions representing type of IPA IRQ
+ * @IPA_IRQ_UC_0: Microcontroller event interrupt
+ * @IPA_IRQ_UC_1: Microcontroller response interrupt
+ * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
+ *
+ * IRQ types not described above are not currently used.
+ */
+enum ipa_irq_id {
+ IPA_IRQ_BAD_SNOC_ACCESS = 0x0,
+ /* Type (bit) 0x1 is not defined */
+ IPA_IRQ_UC_0 = 0x2,
+ IPA_IRQ_UC_1 = 0x3,
+ IPA_IRQ_UC_2 = 0x4,
+ IPA_IRQ_UC_3 = 0x5,
+ IPA_IRQ_UC_IN_Q_NOT_EMPTY = 0x6,
+ IPA_IRQ_UC_RX_CMD_Q_NOT_FULL = 0x7,
+ IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY = 0x8,
+ IPA_IRQ_RX_ERR = 0x9,
+ IPA_IRQ_DEAGGR_ERR = 0xa,
+ IPA_IRQ_TX_ERR = 0xb,
+ IPA_IRQ_STEP_MODE = 0xc,
+ IPA_IRQ_PROC_ERR = 0xd,
+ IPA_IRQ_TX_SUSPEND = 0xe,
+ IPA_IRQ_TX_HOLB_DROP = 0xf,
+ IPA_IRQ_BAM_GSI_IDLE = 0x10,
+ IPA_IRQ_PIPE_YELLOW_BELOW = 0x11,
+ IPA_IRQ_PIPE_RED_BELOW = 0x12,
+ IPA_IRQ_PIPE_YELLOW_ABOVE = 0x13,
+ IPA_IRQ_PIPE_RED_ABOVE = 0x14,
+ IPA_IRQ_UCP = 0x15,
+ IPA_IRQ_DCMP = 0x16,
+ IPA_IRQ_GSI_EE = 0x17,
+ IPA_IRQ_GSI_IPA_IF_TLV_RCVD = 0x18,
+ IPA_IRQ_GSI_UC = 0x19,
+ IPA_IRQ_COUNT, /* Last; not an id */
+};
#define IPA_REG_IRQ_UC_OFFSET \
IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \
(0x0000301c + 0x1000 * (ee))
+#define UC_INTR_FMASK GENMASK(0, 0)
+/* ipa->available defines the valid bits in the SUSPEND_INFO register */
#define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \
IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \
(0x00003030 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_INFO register */
-#define IPA_REG_SUSPEND_IRQ_EN_OFFSET \
- IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(ee) \
+/* ipa->available defines the valid bits in the IRQ_SUSPEND_EN register */
+#define IPA_REG_IRQ_SUSPEND_EN_OFFSET \
+ IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(ee) \
(0x00003034 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_IRQ_EN register */
-#define IPA_REG_SUSPEND_IRQ_CLR_OFFSET \
- IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(ee) \
+/* ipa->available defines the valid bits in the IRQ_SUSPEND_CLR register */
+#define IPA_REG_IRQ_SUSPEND_CLR_OFFSET \
+ IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(ee) \
(0x00003038 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_IRQ_CLR register */
-
-/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
-enum ipa_cs_offload_en {
- IPA_CS_OFFLOAD_NONE = 0,
- IPA_CS_OFFLOAD_UL = 1,
- IPA_CS_OFFLOAD_DL = 2,
- IPA_CS_RSVD
-};
-
-/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
-enum ipa_aggr_en {
- IPA_BYPASS_AGGR = 0,
- IPA_ENABLE_AGGR = 1,
- IPA_ENABLE_DEAGGR = 2,
-};
-
-/** enum ipa_aggr_type - aggregation type field in in_ENDP_INIT_AGGR_N */
-enum ipa_aggr_type {
- IPA_MBIM_16 = 0,
- IPA_HDLC = 1,
- IPA_TLP = 2,
- IPA_RNDIS = 3,
- IPA_GENERIC = 4,
- IPA_COALESCE = 5,
- IPA_QCMAP = 6,
-};
-
-/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
-enum ipa_mode {
- IPA_BASIC = 0,
- IPA_ENABLE_FRAMING_HDLC = 1,
- IPA_ENABLE_DEFRAMING_HDLC = 2,
- IPA_DMA = 3,
-};
-
-/**
- * enum ipa_seq_type - HPS and DPS sequencer type fields in in ENDP_INIT_SEQ_N
- * @IPA_SEQ_DMA_ONLY: only DMA is performed
- * @IPA_SEQ_PKT_PROCESS_NO_DEC_UCP:
- * packet processing + no decipher + microcontroller (Ethernet Bridging)
- * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
- * second packet processing pass + no decipher + microcontroller
- * @IPA_SEQ_DMA_DEC: DMA + cipher/decipher
- * @IPA_SEQ_DMA_COMP_DECOMP: DMA + compression/decompression
- * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP:
- * packet processing + no decipher + no uCP + HPS REP DMA parser
- * @IPA_SEQ_INVALID: invalid sequencer type
- *
- * The values defined here are broken into 4-bit nibbles that are written
- * into fields of the INIT_SEQ_N endpoint registers.
- */
-enum ipa_seq_type {
- IPA_SEQ_DMA_ONLY = 0x0000,
- IPA_SEQ_PKT_PROCESS_NO_DEC_UCP = 0x0002,
- IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
- IPA_SEQ_DMA_DEC = 0x0011,
- IPA_SEQ_DMA_COMP_DECOMP = 0x0020,
- IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
- IPA_SEQ_INVALID = 0xffff,
-};
int ipa_reg_init(struct ipa *ipa);
void ipa_reg_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index b3790aa952a1..32e2d3e052d5 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -422,8 +422,8 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH;
- val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH;
+ val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
+ val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
ipa_cmd_register_write_add(trans, offset, val, val, false);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index b382d47bc70d..dee58a6596d4 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -86,32 +86,32 @@ struct ipa_uc_mem_area {
/** enum ipa_uc_command - commands from the AP to the microcontroller */
enum ipa_uc_command {
- IPA_UC_COMMAND_NO_OP = 0,
- IPA_UC_COMMAND_UPDATE_FLAGS = 1,
- IPA_UC_COMMAND_DEBUG_RUN_TEST = 2,
- IPA_UC_COMMAND_DEBUG_GET_INFO = 3,
- IPA_UC_COMMAND_ERR_FATAL = 4,
- IPA_UC_COMMAND_CLK_GATE = 5,
- IPA_UC_COMMAND_CLK_UNGATE = 6,
- IPA_UC_COMMAND_MEMCPY = 7,
- IPA_UC_COMMAND_RESET_PIPE = 8,
- IPA_UC_COMMAND_REG_WRITE = 9,
- IPA_UC_COMMAND_GSI_CH_EMPTY = 10,
+ IPA_UC_COMMAND_NO_OP = 0x0,
+ IPA_UC_COMMAND_UPDATE_FLAGS = 0x1,
+ IPA_UC_COMMAND_DEBUG_RUN_TEST = 0x2,
+ IPA_UC_COMMAND_DEBUG_GET_INFO = 0x3,
+ IPA_UC_COMMAND_ERR_FATAL = 0x4,
+ IPA_UC_COMMAND_CLK_GATE = 0x5,
+ IPA_UC_COMMAND_CLK_UNGATE = 0x6,
+ IPA_UC_COMMAND_MEMCPY = 0x7,
+ IPA_UC_COMMAND_RESET_PIPE = 0x8,
+ IPA_UC_COMMAND_REG_WRITE = 0x9,
+ IPA_UC_COMMAND_GSI_CH_EMPTY = 0xa,
};
/** enum ipa_uc_response - microcontroller response codes */
enum ipa_uc_response {
- IPA_UC_RESPONSE_NO_OP = 0,
- IPA_UC_RESPONSE_INIT_COMPLETED = 1,
- IPA_UC_RESPONSE_CMD_COMPLETED = 2,
- IPA_UC_RESPONSE_DEBUG_GET_INFO = 3,
+ IPA_UC_RESPONSE_NO_OP = 0x0,
+ IPA_UC_RESPONSE_INIT_COMPLETED = 0x1,
+ IPA_UC_RESPONSE_CMD_COMPLETED = 0x2,
+ IPA_UC_RESPONSE_DEBUG_GET_INFO = 0x3,
};
/** enum ipa_uc_event - common cpu events reported by the microcontroller */
enum ipa_uc_event {
- IPA_UC_EVENT_NO_OP = 0,
- IPA_UC_EVENT_ERROR = 1,
- IPA_UC_EVENT_LOG_INFO = 2,
+ IPA_UC_EVENT_NO_OP = 0x0,
+ IPA_UC_EVENT_ERROR = 0x1,
+ IPA_UC_EVENT_LOG_INFO = 0x2,
};
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
@@ -129,9 +129,10 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
- else
+ else if (shared->event != IPA_UC_EVENT_LOG_INFO)
dev_err(dev, "unsupported microcontroller event %hhu\n",
shared->event);
+ /* The LOG_INFO event can be safely ignored */
}
/* Microcontroller response IPA interrupt handler */
@@ -191,14 +192,19 @@ void ipa_uc_teardown(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ u32 val;
+ /* Fill in the command data */
shared->command = command;
shared->command_param = cpu_to_le32(command_param);
shared->command_param_hi = 0;
shared->response = 0;
shared->response_param = 0;
- iowrite32(1, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
+ /* Use an interrupt to tell the microcontroller the command is ready */
+ val = u32_encode_bits(1, UC_INTR_FMASK);
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 11ca5fa902a1..92425e1fd70c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -101,6 +101,7 @@ struct pcpu_secy_stats {
* @real_dev: pointer to underlying netdevice
* @stats: MACsec device stats
* @secys: linked list of SecY's on the underlying device
+ * @gro_cells: pointer to the Generic Receive Offload cell
* @offload: status of offloading on the MACsec device
*/
struct macsec_dev {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index c8d803d3616c..d9b6c44a5911 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1096,7 +1096,7 @@ static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *real_dev = vlan->lowerdev;
struct netpoll *netpoll;
- int err = 0;
+ int err;
netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
err = -ENOMEM;
@@ -1339,7 +1339,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-/**
+/*
* reconfigure list of remote source mac address
* (only for macvlan devices in source mode)
* Note regarding alignment: all netlink data is aligned to 4 Byte, which
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
new file mode 100644
index 000000000000..d3f92781314e
--- /dev/null
+++ b/drivers/net/mhi_net.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* MHI Network driver - Network over MHI bus
+ *
+ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+#include <linux/if_arp.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#define MHI_NET_MIN_MTU ETH_MIN_MTU
+#define MHI_NET_MAX_MTU 0xffff
+#define MHI_NET_DEFAULT_MTU 0x4000
+
+struct mhi_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t rx_dropped;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ atomic_t rx_queued;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_net_dev {
+ struct mhi_device *mdev;
+ struct net_device *ndev;
+ struct delayed_work rx_refill;
+ struct mhi_net_stats stats;
+ u32 rx_queue_sz;
+};
+
+static int mhi_ndo_open(struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+
+ /* Feed the rx buffer pool */
+ schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_ndo_stop(struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+ cancel_delayed_work_sync(&mhi_netdev->rx_refill);
+
+ return 0;
+}
+
+static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int err;
+
+ err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
+ ndev->name, err);
+
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.tx_dropped);
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ /* drop the packet */
+ dev_kfree_skb_any(skb);
+ }
+
+ if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static void mhi_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
+ stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
+ stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
+ stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
+ stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
+ stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
+ stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
+ stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
+ stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
+}
+
+static const struct net_device_ops mhi_netdev_ops = {
+ .ndo_open = mhi_ndo_open,
+ .ndo_stop = mhi_ndo_stop,
+ .ndo_start_xmit = mhi_ndo_xmit,
+ .ndo_get_stats64 = mhi_ndo_get_stats64,
+};
+
+static void mhi_net_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_NONE; /* QMAP... */
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_netdev_ops;
+ ndev->mtu = MHI_NET_DEFAULT_MTU;
+ ndev->min_mtu = MHI_NET_MIN_MTU;
+ ndev->max_mtu = MHI_NET_MAX_MTU;
+ ndev->tx_queue_len = 1000;
+}
+
+static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+ int remaining;
+
+ remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
+
+ if (unlikely(mhi_res->transaction_status)) {
+ dev_kfree_skb_any(skb);
+
+ /* MHI layer stopping/resetting the DL channel */
+ if (mhi_res->transaction_status == -ENOTCONN)
+ return;
+
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+ } else {
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+
+ skb->protocol = htons(ETH_P_MAP);
+ skb_put(skb, mhi_res->bytes_xferd);
+ netif_rx(skb);
+ }
+
+ /* Refill if RX buffers queue becomes low */
+ if (remaining <= mhi_netdev->rx_queue_sz / 2)
+ schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+}
+
+static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct sk_buff *skb = mhi_res->buf_addr;
+
+ /* Hardware has consumed the buffer, so free the skb (which is not
+ * freed by the MHI stack) and perform accounting.
+ */
+ dev_consume_skb_any(skb);
+
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ if (unlikely(mhi_res->transaction_status)) {
+
+ /* MHI layer stopping/resetting the UL channel */
+ if (mhi_res->transaction_status == -ENOTCONN) {
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+ return;
+ }
+
+ u64_stats_inc(&mhi_netdev->stats.tx_errors);
+ } else {
+ u64_stats_inc(&mhi_netdev->stats.tx_packets);
+ u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
+ }
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static void mhi_net_rx_refill_work(struct work_struct *work)
+{
+ struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
+ rx_refill.work);
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int size = READ_ONCE(ndev->mtu);
+ struct sk_buff *skb;
+ int err;
+
+ while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb))
+ break;
+
+ err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
+ ndev->name, err);
+ kfree_skb(skb);
+ break;
+ }
+
+ atomic_inc(&mhi_netdev->stats.rx_queued);
+
+ /* Do not hog the CPU if rx buffers are consumed faster than
+ * queued (unlikely).
+ */
+ cond_resched();
+ }
+
+ /* If we're still starved of rx buffers, reschedule later */
+ if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
+ schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
+}
+
+static int mhi_net_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ const char *netname = (char *)id->driver_data;
+ struct device *dev = &mhi_dev->dev;
+ struct mhi_net_dev *mhi_netdev;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE,
+ mhi_net_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ mhi_netdev = netdev_priv(ndev);
+ dev_set_drvdata(dev, mhi_netdev);
+ mhi_netdev->ndev = ndev;
+ mhi_netdev->mdev = mhi_dev;
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ /* All MHI net channels have 128 ring elements (at least for now) */
+ mhi_netdev->rx_queue_sz = 128;
+
+ INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
+ u64_stats_init(&mhi_netdev->stats.rx_syncp);
+ u64_stats_init(&mhi_netdev->stats.tx_syncp);
+
+ /* Start MHI channels */
+ err = mhi_prepare_for_transfer(mhi_dev);
+ if (err)
+ goto out_err;
+
+ err = register_netdev(ndev);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ free_netdev(ndev);
+ return err;
+}
+
+static void mhi_net_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ unregister_netdev(mhi_netdev->ndev);
+
+ mhi_unprepare_from_transfer(mhi_netdev->mdev);
+
+ free_netdev(mhi_netdev->ndev);
+}
+
+static const struct mhi_device_id mhi_net_id_table[] = {
+ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" },
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
+
+static struct mhi_driver mhi_net_driver = {
+ .probe = mhi_net_probe,
+ .remove = mhi_net_remove,
+ .dl_xfer_cb = mhi_net_dl_callback,
+ .ul_xfer_cb = mhi_net_ul_callback,
+ .id_table = mhi_net_id_table,
+ .driver = {
+ .name = "mhi_net",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_driver(mhi_net_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Network over MHI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index f6a97c859f3a..e71ebb933266 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -84,15 +84,16 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
}
+
+ ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
+ if (mii->supports_gmii)
+ ecmd->advertising |=
+ mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
+
if (bmcr & BMCR_ANENABLE) {
ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
- if (mii->supports_gmii)
- ecmd->advertising |=
- mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
-
if (bmsr & BMSR_ANEGCOMPLETE) {
ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
ecmd->lp_advertising |=
@@ -171,14 +172,15 @@ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
}
+
+ advertising |= mii_get_an(mii, MII_ADVERTISE);
+ if (mii->supports_gmii)
+ advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
+
if (bmcr & BMCR_ANENABLE) {
advertising |= ADVERTISED_Autoneg;
cmd->base.autoneg = AUTONEG_ENABLE;
- advertising |= mii_get_an(mii, MII_ADVERTISE);
- if (mii->supports_gmii)
- advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
-
if (bmsr & BMSR_ANEGCOMPLETE) {
lp_advertising = mii_get_an(mii, MII_LPA);
lp_advertising |=
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index fb182bec8f06..2a4892402ed8 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -697,7 +697,7 @@ static struct failover_ops net_failover_ops = {
/**
* net_failover_create - Create and register a failover instance
*
- * @dev: standby netdev
+ * @standby_dev: standby netdev
*
* Creates a failover netdev and registers a failover instance for a standby
* netdev. Used by paravirtual drivers that use 3-netdev model.
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 92001f7af380..ccecba908ded 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -83,6 +83,7 @@ static struct console netconsole_ext;
* whether the corresponding netpoll is active or inactive.
* Also, other parameters of a target may be modified at
* runtime only when it is disabled (enabled == 0).
+ * @extended: Denotes whether console is extended or not.
* @np: The netpoll structure for this target.
* Contains the other userspace visible parameters:
* dev_name (read-write)
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index e7972e88ffe0..fe48817b3985 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -326,6 +326,12 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
+ /* Resources for nexthops */
+ err = devlink_resource_register(devlink, "nexthops", (u64)-1,
+ NSIM_RESOURCE_NEXTHOPS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+
out:
return err;
}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index deea17a0e79c..45d8a7790bd5 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -25,6 +25,7 @@
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
#include <net/net_namespace.h>
+#include <net/nexthop.h>
#include "netdevsim.h"
@@ -42,9 +43,12 @@ struct nsim_fib_data {
struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
+ struct nsim_fib_entry nexthops;
struct rhashtable fib_rt_ht;
struct list_head fib_rt_list;
spinlock_t fib_lock; /* Protects hashtable, list and accounting */
+ struct notifier_block nexthop_nb;
+ struct rhashtable nexthop_ht;
struct devlink *devlink;
};
@@ -86,6 +90,19 @@ static const struct rhashtable_params nsim_fib_rt_ht_params = {
.automatic_shrinking = true,
};
+struct nsim_nexthop {
+ struct rhash_head ht_node;
+ u64 occ;
+ u32 id;
+};
+
+static const struct rhashtable_params nsim_nexthop_ht_params = {
+ .key_offset = offsetof(struct nsim_nexthop, id),
+ .head_offset = offsetof(struct nsim_nexthop, ht_node),
+ .key_len = sizeof(u32),
+ .automatic_shrinking = true,
+};
+
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
enum nsim_resource_id res_id, bool max)
{
@@ -104,6 +121,9 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
case NSIM_RESOURCE_IPV6_FIB_RULES:
entry = &fib_data->ipv6.rules;
break;
+ case NSIM_RESOURCE_NEXTHOPS:
+ entry = &fib_data->nexthops;
+ break;
default:
return 0;
}
@@ -129,6 +149,9 @@ static void nsim_fib_set_max(struct nsim_fib_data *fib_data,
case NSIM_RESOURCE_IPV6_FIB_RULES:
entry = &fib_data->ipv6.rules;
break;
+ case NSIM_RESOURCE_NEXTHOPS:
+ entry = &fib_data->nexthops;
+ break;
default:
WARN_ON(1);
return;
@@ -389,11 +412,6 @@ static int nsim_fib4_event(struct nsim_fib_data *data,
fen_info = container_of(info, struct fib_entry_notifier_info, info);
- if (fen_info->fi->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
- return 0;
- }
-
switch (event) {
case FIB_EVENT_ENTRY_REPLACE:
err = nsim_fib4_rt_insert(data, fen_info);
@@ -704,11 +722,6 @@ static int nsim_fib6_event(struct nsim_fib_data *data,
fen6_info = container_of(info, struct fib6_entry_notifier_info, info);
- if (fen6_info->rt->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
- return 0;
- }
-
if (fen6_info->rt->fib6_src.plen) {
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 source-specific route is not supported");
return 0;
@@ -838,6 +851,196 @@ static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
data->ipv6.rules.num = 0ULL;
}
+static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
+ struct nh_notifier_info *info)
+{
+ struct nsim_nexthop *nexthop;
+ u64 occ = 0;
+ int i;
+
+ nexthop = kzalloc(sizeof(*nexthop), GFP_KERNEL);
+ if (!nexthop)
+ return NULL;
+
+ nexthop->id = info->id;
+
+ /* Determine the number of nexthop entries the new nexthop will
+ * occupy.
+ */
+
+ if (!info->is_grp) {
+ occ = 1;
+ goto out;
+ }
+
+ for (i = 0; i < info->nh_grp->num_nh; i++)
+ occ += info->nh_grp->nh_entries[i].weight;
+
+out:
+ nexthop->occ = occ;
+ return nexthop;
+}
+
+static void nsim_nexthop_destroy(struct nsim_nexthop *nexthop)
+{
+ kfree(nexthop);
+}
+
+static int nsim_nexthop_account(struct nsim_fib_data *data, u64 occ,
+ bool add, struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ if (add) {
+ if (data->nexthops.num + occ <= data->nexthops.max) {
+ data->nexthops.num += occ;
+ } else {
+ err = -ENOSPC;
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported nexthops");
+ }
+ } else {
+ if (WARN_ON(occ > data->nexthops.num))
+ return -EINVAL;
+ data->nexthops.num -= occ;
+ }
+
+ return err;
+}
+
+static int nsim_nexthop_add(struct nsim_fib_data *data,
+ struct nsim_nexthop *nexthop,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ err = nsim_nexthop_account(data, nexthop->occ, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->nexthop_ht, &nexthop->ht_node,
+ nsim_nexthop_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert nexthop");
+ goto err_nexthop_dismiss;
+ }
+
+ nexthop_set_hw_flags(net, nexthop->id, false, true);
+
+ return 0;
+
+err_nexthop_dismiss:
+ nsim_nexthop_account(data, nexthop->occ, false, extack);
+ return err;
+}
+
+static int nsim_nexthop_replace(struct nsim_fib_data *data,
+ struct nsim_nexthop *nexthop,
+ struct nsim_nexthop *nexthop_old,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ err = nsim_nexthop_account(data, nexthop->occ, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_replace_fast(&data->nexthop_ht,
+ &nexthop_old->ht_node, &nexthop->ht_node,
+ nsim_nexthop_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace nexthop");
+ goto err_nexthop_dismiss;
+ }
+
+ nexthop_set_hw_flags(net, nexthop->id, false, true);
+ nsim_nexthop_account(data, nexthop_old->occ, false, extack);
+ nsim_nexthop_destroy(nexthop_old);
+
+ return 0;
+
+err_nexthop_dismiss:
+ nsim_nexthop_account(data, nexthop->occ, false, extack);
+ return err;
+}
+
+static int nsim_nexthop_insert(struct nsim_fib_data *data,
+ struct nh_notifier_info *info)
+{
+ struct nsim_nexthop *nexthop, *nexthop_old;
+ int err;
+
+ nexthop = nsim_nexthop_create(data, info);
+ if (!nexthop)
+ return -ENOMEM;
+
+ nexthop_old = rhashtable_lookup_fast(&data->nexthop_ht, &info->id,
+ nsim_nexthop_ht_params);
+ if (!nexthop_old)
+ err = nsim_nexthop_add(data, nexthop, info->extack);
+ else
+ err = nsim_nexthop_replace(data, nexthop, nexthop_old,
+ info->extack);
+
+ if (err)
+ nsim_nexthop_destroy(nexthop);
+
+ return err;
+}
+
+static void nsim_nexthop_remove(struct nsim_fib_data *data,
+ struct nh_notifier_info *info)
+{
+ struct nsim_nexthop *nexthop;
+
+ nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &info->id,
+ nsim_nexthop_ht_params);
+ if (!nexthop)
+ return;
+
+ rhashtable_remove_fast(&data->nexthop_ht, &nexthop->ht_node,
+ nsim_nexthop_ht_params);
+ nsim_nexthop_account(data, nexthop->occ, false, info->extack);
+ nsim_nexthop_destroy(nexthop);
+}
+
+static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ nexthop_nb);
+ struct nh_notifier_info *info = ptr;
+ int err = 0;
+
+ ASSERT_RTNL();
+
+ switch (event) {
+ case NEXTHOP_EVENT_REPLACE:
+ err = nsim_nexthop_insert(data, info);
+ break;
+ case NEXTHOP_EVENT_DEL:
+ nsim_nexthop_remove(data, info);
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void nsim_nexthop_free(void *ptr, void *arg)
+{
+ struct nsim_nexthop *nexthop = ptr;
+ struct nsim_fib_data *data = arg;
+ struct net *net;
+
+ net = devlink_net(data->devlink);
+ nexthop_set_hw_flags(net, nexthop->id, false, false);
+ nsim_nexthop_account(data, nexthop->occ, false, NULL);
+ nsim_nexthop_destroy(nexthop);
+}
+
static u64 nsim_fib_ipv4_resource_occ_get(void *priv)
{
struct nsim_fib_data *data = priv;
@@ -866,12 +1069,20 @@ static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv)
return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false);
}
+static u64 nsim_fib_nexthops_res_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_NEXTHOPS, false);
+}
+
static void nsim_fib_set_max_all(struct nsim_fib_data *data,
struct devlink *devlink)
{
enum nsim_resource_id res_ids[] = {
NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_NEXTHOPS,
};
int i;
@@ -897,20 +1108,32 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
return ERR_PTR(-ENOMEM);
data->devlink = devlink;
+ err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params);
+ if (err)
+ goto err_data_free;
+
spin_lock_init(&data->fib_lock);
INIT_LIST_HEAD(&data->fib_rt_list);
err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params);
if (err)
- goto err_data_free;
+ goto err_rhashtable_nexthop_destroy;
nsim_fib_set_max_all(data, devlink);
+ data->nexthop_nb.notifier_call = nsim_nexthop_event_nb;
+ err = register_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb,
+ extack);
+ if (err) {
+ pr_err("Failed to register nexthop notifier\n");
+ goto err_rhashtable_fib_destroy;
+ }
+
data->fib_nb.notifier_call = nsim_fib_event_nb;
err = register_fib_notifier(devlink_net(devlink), &data->fib_nb,
nsim_fib_dump_inconsistent, extack);
if (err) {
pr_err("Failed to register fib notifier\n");
- goto err_rhashtable_destroy;
+ goto err_nexthop_nb_unregister;
}
devlink_resource_occ_get_register(devlink,
@@ -929,11 +1152,20 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
NSIM_RESOURCE_IPV6_FIB_RULES,
nsim_fib_ipv6_rules_res_occ_get,
data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_NEXTHOPS,
+ nsim_fib_nexthops_res_occ_get,
+ data);
return data;
-err_rhashtable_destroy:
+err_nexthop_nb_unregister:
+ unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
+err_rhashtable_fib_destroy:
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
+err_rhashtable_nexthop_destroy:
+ rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free,
+ data);
err_data_free:
kfree(data);
return ERR_PTR(err);
@@ -942,6 +1174,8 @@ err_data_free:
void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
{
devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_NEXTHOPS);
+ devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV6_FIB_RULES);
devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV6_FIB);
@@ -950,8 +1184,11 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
+ rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free,
+ data);
WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
kfree(data);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 827fc80f50a0..698be048041b 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -158,6 +158,7 @@ enum nsim_resource_id {
NSIM_RESOURCE_IPV6,
NSIM_RESOURCE_IPV6_FIB,
NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_NEXTHOPS,
};
struct nsim_dev_health {
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 307f0ac1287b..55a0b91816e2 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/ethtool_netlink.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -23,6 +24,7 @@
#define ADIN1300_PHY_CTRL1 0x0012
#define ADIN1300_AUTO_MDI_EN BIT(10)
#define ADIN1300_MAN_MDIX_EN BIT(9)
+#define ADIN1300_DIAG_CLK_EN BIT(2)
#define ADIN1300_RX_ERR_CNT 0x0014
@@ -69,6 +71,31 @@
#define ADIN1300_CLOCK_STOP_REG 0x9400
#define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000
+#define ADIN1300_CDIAG_RUN 0xba1b
+#define ADIN1300_CDIAG_RUN_EN BIT(0)
+
+/*
+ * The XSIM3/2/1 and XSHRT3/2/1 are actually relative.
+ * For CDIAG_DTLD_RSLTS(0) it's ADIN1300_CDIAG_RSLT_XSIM3/2/1
+ * For CDIAG_DTLD_RSLTS(1) it's ADIN1300_CDIAG_RSLT_XSIM3/2/0
+ * For CDIAG_DTLD_RSLTS(2) it's ADIN1300_CDIAG_RSLT_XSIM3/1/0
+ * For CDIAG_DTLD_RSLTS(3) it's ADIN1300_CDIAG_RSLT_XSIM2/1/0
+ */
+#define ADIN1300_CDIAG_DTLD_RSLTS(x) (0xba1d + (x))
+#define ADIN1300_CDIAG_RSLT_BUSY BIT(10)
+#define ADIN1300_CDIAG_RSLT_XSIM3 BIT(9)
+#define ADIN1300_CDIAG_RSLT_XSIM2 BIT(8)
+#define ADIN1300_CDIAG_RSLT_XSIM1 BIT(7)
+#define ADIN1300_CDIAG_RSLT_SIM BIT(6)
+#define ADIN1300_CDIAG_RSLT_XSHRT3 BIT(5)
+#define ADIN1300_CDIAG_RSLT_XSHRT2 BIT(4)
+#define ADIN1300_CDIAG_RSLT_XSHRT1 BIT(3)
+#define ADIN1300_CDIAG_RSLT_SHRT BIT(2)
+#define ADIN1300_CDIAG_RSLT_OPEN BIT(1)
+#define ADIN1300_CDIAG_RSLT_GOOD BIT(0)
+
+#define ADIN1300_CDIAG_FLT_DIST(x) (0xba21 + (x))
+
#define ADIN1300_GE_SOFT_RESET_REG 0xff0c
#define ADIN1300_GE_SOFT_RESET BIT(0)
@@ -321,10 +348,9 @@ static int adin_set_downshift(struct phy_device *phydev, u8 cnt)
return -E2BIG;
val = FIELD_PREP(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt);
- val |= ADIN1300_LINKING_EN;
rc = phy_modify(phydev, ADIN1300_PHY_CTRL3,
- ADIN1300_LINKING_EN | ADIN1300_DOWNSPEED_RETRIES_MSK,
+ ADIN1300_DOWNSPEED_RETRIES_MSK,
val);
if (rc < 0)
return rc;
@@ -445,12 +471,43 @@ static int adin_phy_ack_intr(struct phy_device *phydev)
static int adin_phy_config_intr(struct phy_device *phydev)
{
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- return phy_set_bits(phydev, ADIN1300_INT_MASK_REG,
- ADIN1300_INT_MASK_EN);
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = adin_phy_ack_intr(phydev);
+ if (err)
+ return err;
+
+ err = phy_set_bits(phydev, ADIN1300_INT_MASK_REG,
+ ADIN1300_INT_MASK_EN);
+ } else {
+ err = phy_clear_bits(phydev, ADIN1300_INT_MASK_REG,
+ ADIN1300_INT_MASK_EN);
+ if (err)
+ return err;
+
+ err = adin_phy_ack_intr(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, ADIN1300_INT_STATUS_REG);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
- return phy_clear_bits(phydev, ADIN1300_INT_MASK_REG,
- ADIN1300_INT_MASK_EN);
+ if (!(irq_status & ADIN1300_INT_LINK_STAT_CHNG_EN))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad,
@@ -555,6 +612,14 @@ static int adin_config_aneg(struct phy_device *phydev)
{
int ret;
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL1, ADIN1300_DIAG_CLK_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits(phydev, ADIN1300_PHY_CTRL3, ADIN1300_LINKING_EN);
+ if (ret < 0)
+ return ret;
+
ret = adin_config_mdix(phydev);
if (ret)
return ret;
@@ -725,10 +790,117 @@ static int adin_probe(struct phy_device *phydev)
return 0;
}
+static int adin_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL3, ADIN1300_LINKING_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL1, ADIN1300_DIAG_CLK_EN);
+ if (ret < 0)
+ return ret;
+
+ /* wait a bit for the clock to stabilize */
+ msleep(50);
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_CDIAG_RUN,
+ ADIN1300_CDIAG_RUN_EN);
+}
+
+static int adin_cable_test_report_trans(int result)
+{
+ int mask;
+
+ if (result & ADIN1300_CDIAG_RSLT_GOOD)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ if (result & ADIN1300_CDIAG_RSLT_OPEN)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+
+ /* short with other pairs */
+ mask = ADIN1300_CDIAG_RSLT_XSHRT3 |
+ ADIN1300_CDIAG_RSLT_XSHRT2 |
+ ADIN1300_CDIAG_RSLT_XSHRT1;
+ if (result & mask)
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+
+ if (result & ADIN1300_CDIAG_RSLT_SHRT)
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+}
+
+static int adin_cable_test_report_pair(struct phy_device *phydev,
+ unsigned int pair)
+{
+ int fault_rslt;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_CDIAG_DTLD_RSLTS(pair));
+ if (ret < 0)
+ return ret;
+
+ fault_rslt = adin_cable_test_report_trans(ret);
+
+ ret = ethnl_cable_test_result(phydev, pair, fault_rslt);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_CDIAG_FLT_DIST(pair));
+ if (ret < 0)
+ return ret;
+
+ switch (fault_rslt) {
+ case ETHTOOL_A_CABLE_RESULT_CODE_OPEN:
+ case ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT:
+ case ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT:
+ return ethnl_cable_test_fault_length(phydev, pair, ret * 100);
+ default:
+ return 0;
+ }
+}
+
+static int adin_cable_test_report(struct phy_device *phydev)
+{
+ unsigned int pair;
+ int ret;
+
+ for (pair = ETHTOOL_A_CABLE_PAIR_A; pair <= ETHTOOL_A_CABLE_PAIR_D; pair++) {
+ ret = adin_cable_test_report_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adin_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret;
+
+ *finished = false;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_CDIAG_RUN);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ADIN1300_CDIAG_RUN_EN)
+ return 0;
+
+ *finished = true;
+
+ return adin_cable_test_report(phydev);
+}
+
static struct phy_driver adin_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200),
.name = "ADIN1200",
+ .flags = PHY_POLL_CABLE_TEST,
.probe = adin_probe,
.config_init = adin_config_init,
.soft_reset = adin_soft_reset,
@@ -736,8 +908,8 @@ static struct phy_driver adin_driver[] = {
.read_status = adin_read_status,
.get_tunable = adin_get_tunable,
.set_tunable = adin_set_tunable,
- .ack_interrupt = adin_phy_ack_intr,
.config_intr = adin_phy_config_intr,
+ .handle_interrupt = adin_phy_handle_interrupt,
.get_sset_count = adin_get_sset_count,
.get_strings = adin_get_strings,
.get_stats = adin_get_stats,
@@ -745,10 +917,13 @@ static struct phy_driver adin_driver[] = {
.suspend = genphy_suspend,
.read_mmd = adin_read_mmd,
.write_mmd = adin_write_mmd,
+ .cable_test_start = adin_cable_test_start,
+ .cable_test_get_status = adin_cable_test_get_status,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300),
.name = "ADIN1300",
+ .flags = PHY_POLL_CABLE_TEST,
.probe = adin_probe,
.config_init = adin_config_init,
.soft_reset = adin_soft_reset,
@@ -756,8 +931,8 @@ static struct phy_driver adin_driver[] = {
.read_status = adin_read_status,
.get_tunable = adin_get_tunable,
.set_tunable = adin_set_tunable,
- .ack_interrupt = adin_phy_ack_intr,
.config_intr = adin_phy_config_intr,
+ .handle_interrupt = adin_phy_handle_interrupt,
.get_sset_count = adin_get_sset_count,
.get_strings = adin_get_strings,
.get_stats = adin_get_stats,
@@ -765,6 +940,8 @@ static struct phy_driver adin_driver[] = {
.suspend = genphy_suspend,
.read_mmd = adin_read_mmd,
.write_mmd = adin_write_mmd,
+ .cable_test_start = adin_cable_test_start,
+ .cable_test_get_status = adin_cable_test_get_status,
},
};
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index eef35f8c8d45..001bb6d8bfce 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -20,6 +20,10 @@
#define MII_AM79C_IR_EN_ANEG 0x0100 /* IR enable Aneg Complete */
#define MII_AM79C_IR_IMASK_INIT (MII_AM79C_IR_EN_LINK | MII_AM79C_IR_EN_ANEG)
+#define MII_AM79C_IR_LINK_DOWN BIT(2)
+#define MII_AM79C_IR_ANEG_DONE BIT(0)
+#define MII_AM79C_IR_IMASK_STAT (MII_AM79C_IR_LINK_DOWN | MII_AM79C_IR_ANEG_DONE)
+
MODULE_DESCRIPTION("AMD PHY driver");
MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
MODULE_LICENSE("GPL");
@@ -48,22 +52,49 @@ static int am79c_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = am79c_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_AM79C_IR, MII_AM79C_IR_IMASK_INIT);
- else
+ } else {
err = phy_write(phydev, MII_AM79C_IR, 0);
+ if (err)
+ return err;
+
+ err = am79c_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t am79c_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_AM79C_IR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_AM79C_IR_IMASK_STAT))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static struct phy_driver am79c_driver[] = { {
.phy_id = PHY_ID_AM79C874,
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.config_init = am79c_config_init,
- .ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
+ .handle_interrupt = am79c_handle_interrupt,
} };
module_phy_driver(am79c_driver);
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 41e7c1432497..968dd43a2b1e 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -52,6 +52,7 @@
#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1)
#define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01
+#define MDIO_AN_TX_VEND_INT_STATUS2_MASK BIT(0)
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
#define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0)
@@ -246,6 +247,13 @@ static int aqr_config_intr(struct phy_device *phydev)
bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
int err;
+ if (en) {
+ /* Clear any pending interrupts before enabling them */
+ err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
+ if (err < 0)
+ return err;
+ }
+
err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
if (err < 0)
@@ -256,18 +264,39 @@ static int aqr_config_intr(struct phy_device *phydev)
if (err < 0)
return err;
- return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
- en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
- VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
+ en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
+ VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
+ if (err < 0)
+ return err;
+
+ if (!en) {
+ /* Clear any pending interrupts after we have disabled them */
+ err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
}
-static int aqr_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t aqr_handle_interrupt(struct phy_device *phydev)
{
- int reg;
+ int irq_status;
+
+ irq_status = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_TX_VEND_INT_STATUS2);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MDIO_AN_TX_VEND_INT_STATUS2_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- reg = phy_read_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_STATUS2);
- return (reg < 0) ? reg : 0;
+ return IRQ_HANDLED;
}
static int aqr_read_status(struct phy_device *phydev)
@@ -584,7 +613,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQ1202",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -592,7 +621,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQ2104",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -600,7 +629,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR105",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
.suspend = aqr107_suspend,
.resume = aqr107_resume,
@@ -610,7 +639,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR106",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -620,7 +649,7 @@ static struct phy_driver aqr_driver[] = {
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr107_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
@@ -638,7 +667,7 @@ static struct phy_driver aqr_driver[] = {
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr107_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
@@ -654,7 +683,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR405",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ed601a7e46a0..d0b36fd6c265 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -614,6 +614,11 @@ static int at803x_config_intr(struct phy_device *phydev)
value = phy_read(phydev, AT803X_INTR_ENABLE);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
@@ -621,13 +626,44 @@ static int at803x_config_intr(struct phy_device *phydev)
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
- }
- else
+ } else {
err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, int_enabled;
+
+ irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Read the current enabled interrupts */
+ int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (int_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* See if this was one of our enabled interrupts */
+ if (!(irq_status & int_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static void at803x_link_change_notify(struct phy_device *phydev)
{
/*
@@ -1062,8 +1098,8 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
.cable_test_start = at803x_cable_test_start,
@@ -1082,8 +1118,8 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
}, {
/* Qualcomm Atheros AR8031/AR8033 */
PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
@@ -1100,8 +1136,8 @@ static struct phy_driver at803x_driver[] = {
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
.aneg_done = at803x_aneg_done,
- .ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
.cable_test_start = at803x_cable_test_start,
@@ -1120,8 +1156,8 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
.cable_test_get_status = at803x_cable_test_get_status,
}, {
@@ -1132,8 +1168,8 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
.cable_test_get_status = at803x_cable_test_get_status,
.read_status = at803x_read_status,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 9ccf28b0a04d..da8f7cb41b44 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -256,8 +256,8 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
.name = "Broadcom Cygnus PHY",
/* PHY_GBIT_FEATURES */
.config_init = bcm_cygnus_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm_cygnus_resume,
}, {
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index ef6825b30323..53282a6d5928 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -181,21 +181,62 @@ EXPORT_SYMBOL_GPL(bcm_phy_ack_intr);
int bcm_phy_config_intr(struct phy_device *phydev)
{
- int reg;
+ int reg, err;
reg = phy_read(phydev, MII_BCM54XX_ECR);
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm_phy_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BCM54XX_ECR_IM;
- else
+ err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+ } else {
reg |= MII_BCM54XX_ECR_IM;
+ err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+ if (err)
+ return err;
- return phy_write(phydev, MII_BCM54XX_ECR, reg);
+ err = bcm_phy_ack_intr(phydev);
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(bcm_phy_config_intr);
+irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_mask;
+
+ irq_status = phy_read(phydev, MII_BCM54XX_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* If a bit from the Interrupt Mask register is set, the corresponding
+ * bit from the Interrupt Status register is masked. So read the IMR
+ * and then flip the bits to get the list of possible interrupt
+ * sources.
+ */
+ irq_mask = phy_read(phydev, MII_BCM54XX_IMR);
+ if (irq_mask < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ irq_mask = ~irq_mask;
+
+ if (!(irq_status & irq_mask))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_handle_interrupt);
+
int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow)
{
phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 237a8503c9b4..c3842f87c33b 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -63,6 +63,7 @@ int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask,
int bcm_phy_ack_intr(struct phy_device *phydev);
int bcm_phy_config_intr(struct phy_device *phydev);
+irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev);
int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down);
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 8998e68bb26b..d8f3024860dc 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -637,13 +637,29 @@ static int bcm54140_config_init(struct phy_device *phydev)
BCM54140_RDB_C_PWR_ISOLATE, 0);
}
-static int bcm54140_did_interrupt(struct phy_device *phydev)
+static irqreturn_t bcm54140_handle_interrupt(struct phy_device *phydev)
{
- int ret;
+ int irq_status, irq_mask;
+
+ irq_status = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_mask = bcm_phy_read_rdb(phydev, BCM54140_RDB_IMR);
+ if (irq_mask < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ irq_mask = ~irq_mask;
+
+ if (!(irq_status & irq_mask))
+ return IRQ_NONE;
- ret = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+ phy_trigger_machine(phydev);
- return (ret < 0) ? 0 : ret;
+ return IRQ_HANDLED;
}
static int bcm54140_ack_intr(struct phy_device *phydev)
@@ -665,7 +681,7 @@ static int bcm54140_config_intr(struct phy_device *phydev)
BCM54140_RDB_TOP_IMR_PORT0, BCM54140_RDB_TOP_IMR_PORT1,
BCM54140_RDB_TOP_IMR_PORT2, BCM54140_RDB_TOP_IMR_PORT3,
};
- int reg;
+ int reg, err;
if (priv->port >= ARRAY_SIZE(port_to_imr_bit))
return -EINVAL;
@@ -674,12 +690,23 @@ static int bcm54140_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm54140_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~port_to_imr_bit[priv->port];
- else
+ err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ } else {
reg |= port_to_imr_bit[priv->port];
+ err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ if (err)
+ return err;
+
+ err = bcm54140_ack_intr(phydev);
+ }
- return bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ return err;
}
static int bcm54140_get_downshift(struct phy_device *phydev, u8 *data)
@@ -834,8 +861,7 @@ static struct phy_driver bcm54140_drivers[] = {
.flags = PHY_POLL_CABLE_TEST,
.features = PHY_GBIT_FEATURES,
.config_init = bcm54140_config_init,
- .did_interrupt = bcm54140_did_interrupt,
- .ack_interrupt = bcm54140_ack_intr,
+ .handle_interrupt = bcm54140_handle_interrupt,
.config_intr = bcm54140_config_intr,
.probe = bcm54140_probe,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 459fb2069c7e..0eb33be824f1 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -25,12 +25,22 @@ static int bcm63xx_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm_phy_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BCM63XX_IR_GMASK;
- else
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ } else {
reg |= MII_BCM63XX_IR_GMASK;
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ if (err)
+ return err;
+
+ err = bcm_phy_ack_intr(phydev);
+ }
- err = phy_write(phydev, MII_BCM63XX_IR, reg);
return err;
}
@@ -67,8 +77,8 @@ static struct phy_driver bcm63xx_driver[] = {
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
@@ -77,8 +87,8 @@ static struct phy_driver bcm63xx_driver[] = {
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
} };
module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index df360e1c5069..4ac8fd190e9d 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -144,35 +144,41 @@ static int bcm87xx_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ if (err)
+ return err;
+
reg |= 1;
- else
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ } else {
reg &= ~1;
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ }
- err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
return err;
}
-static int bcm87xx_did_interrupt(struct phy_device *phydev)
+static irqreturn_t bcm87xx_handle_interrupt(struct phy_device *phydev)
{
- int reg;
+ int irq_status;
- reg = phy_read(phydev, BCM87XX_LASI_STATUS);
-
- if (reg < 0) {
- phydev_err(phydev,
- "Error: Read of BCM87XX_LASI_STATUS failed: %d\n",
- reg);
- return 0;
+ irq_status = phy_read(phydev, BCM87XX_LASI_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
}
- return (reg & 1) != 0;
-}
-static int bcm87xx_ack_interrupt(struct phy_device *phydev)
-{
- /* Reading the LASI status clears it. */
- bcm87xx_did_interrupt(phydev);
- return 0;
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int bcm8706_match_phy_device(struct phy_device *phydev)
@@ -194,9 +200,8 @@ static struct phy_driver bcm87xx_driver[] = {
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
- .ack_interrupt = bcm87xx_ack_interrupt,
.config_intr = bcm87xx_config_intr,
- .did_interrupt = bcm87xx_did_interrupt,
+ .handle_interrupt = bcm87xx_handle_interrupt,
.match_phy_device = bcm8706_match_phy_device,
}, {
.phy_id = PHY_ID_BCM8727,
@@ -206,9 +211,8 @@ static struct phy_driver bcm87xx_driver[] = {
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
- .ack_interrupt = bcm87xx_ack_interrupt,
.config_intr = bcm87xx_config_intr,
- .did_interrupt = bcm87xx_did_interrupt,
+ .handle_interrupt = bcm87xx_handle_interrupt,
.match_phy_device = bcm8727_match_phy_device,
} };
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index cd271de9609b..8a4ec3222168 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -634,15 +634,43 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = brcm_fet_ack_interrupt(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BRCM_FET_IR_MASK;
- else
+ err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
+ } else {
reg |= MII_BRCM_FET_IR_MASK;
+ err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
+ if (err)
+ return err;
+
+ err = brcm_fet_ack_interrupt(phydev);
+ }
- err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
return err;
}
+static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_BRCM_FET_INTREG);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
struct bcm53xx_phy_priv {
u64 *stats;
};
@@ -681,40 +709,40 @@ static struct phy_driver broadcom_drivers[] = {
.name = "Broadcom BCM5411",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54210E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54612E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
@@ -722,8 +750,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.read_status = bcm54616s_read_status,
.probe = bcm54616s_probe,
}, {
@@ -732,8 +760,8 @@ static struct phy_driver broadcom_drivers[] = {
.name = "Broadcom BCM5464",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -743,8 +771,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54810,
.phy_id_mask = 0xfffffff0,
@@ -752,8 +780,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm54xx_resume,
}, {
@@ -763,8 +791,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54811_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm54xx_resume,
}, {
@@ -774,48 +802,48 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm5482_config_init,
.read_status = bcm5482_read_status,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
- .ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
+ .handle_interrupt = brcm_fet_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
- .ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
+ .handle_interrupt = brcm_fet_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5395,
.phy_id_mask = 0xfffffff0,
@@ -837,16 +865,16 @@ static struct phy_driver broadcom_drivers[] = {
.get_stats = bcm53xx_phy_get_stats,
.probe = bcm53xx_phy_probe,
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
} };
module_phy_driver(broadcom_drivers);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 9d1612a4d7e6..ef5f412e101f 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -87,15 +87,42 @@ static int cis820x_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = cis820x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_CIS8201_IMASK,
MII_CIS8201_IMASK_MASK);
- else
+ } else {
err = phy_write(phydev, MII_CIS8201_IMASK, 0);
+ if (err)
+ return err;
+
+ err = cis820x_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t cis820x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_CIS8201_ISTAT);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_CIS8201_IMASK_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
/* Cicada 8201, a.k.a Vitesse VSC8201 */
static struct phy_driver cis820x_driver[] = {
{
@@ -104,16 +131,16 @@ static struct phy_driver cis820x_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
- .ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
+ .handle_interrupt = &cis820x_handle_interrupt,
}, {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
/* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
- .ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
+ .handle_interrupt = &cis820x_handle_interrupt,
} };
module_phy_driver(cis820x_driver);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 942f277463a4..a3b3842c67e5 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -47,6 +47,10 @@
#define MII_DM9161_INTR_STOP \
(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
| MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+#define MII_DM9161_INTR_CHANGE \
+ (MII_DM9161_INTR_DPLX_CHANGE | \
+ MII_DM9161_INTR_SPD_CHANGE | \
+ MII_DM9161_INTR_LINK_CHANGE)
/* DM9161 10BT Configuration/Status */
#define MII_DM9161_10BTCSR 0x12
@@ -57,24 +61,58 @@ MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
+static int dm9161_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DM9161_INTR);
+
+ return (err < 0) ? err : 0;
+}
+
#define DM9161_DELAY 1
static int dm9161_config_intr(struct phy_device *phydev)
{
- int temp;
+ int temp, err;
temp = phy_read(phydev, MII_DM9161_INTR);
if (temp < 0)
return temp;
- if (PHY_INTERRUPT_ENABLED == phydev->interrupts)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = dm9161_ack_interrupt(phydev);
+ if (err)
+ return err;
+
temp &= ~(MII_DM9161_INTR_STOP);
- else
+ err = phy_write(phydev, MII_DM9161_INTR, temp);
+ } else {
temp |= MII_DM9161_INTR_STOP;
+ err = phy_write(phydev, MII_DM9161_INTR, temp);
+ if (err)
+ return err;
+
+ err = dm9161_ack_interrupt(phydev);
+ }
+
+ return err;
+}
- temp = phy_write(phydev, MII_DM9161_INTR, temp);
+static irqreturn_t dm9161_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_DM9161_INTR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
- return temp;
+ if (!(irq_status & MII_DM9161_INTR_CHANGE))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int dm9161_config_aneg(struct phy_device *phydev)
@@ -132,13 +170,6 @@ static int dm9161_config_init(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
}
-static int dm9161_ack_interrupt(struct phy_device *phydev)
-{
- int err = phy_read(phydev, MII_DM9161_INTR);
-
- return (err < 0) ? err : 0;
-}
-
static struct phy_driver dm91xx_driver[] = {
{
.phy_id = 0x0181b880,
@@ -147,8 +178,8 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x0181b8b0,
.name = "Davicom DM9161B/C",
@@ -156,8 +187,8 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
@@ -165,15 +196,15 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
} };
module_phy_driver(dm91xx_driver);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index fec58ad69e02..0ee23d29c0d4 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -37,6 +37,8 @@
#define MII_LXT970_ISR 18 /* Interrupt Status Register */
+#define MII_LXT970_IRS_MINT BIT(15)
+
#define MII_LXT970_CONFIG 19 /* Configuration Register */
/* ------------------------------------------------------------------------- */
@@ -47,6 +49,7 @@
#define MII_LXT971_IER_IEN 0x00f2
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
+#define MII_LXT971_ISR_MASK 0x00f0
/* register definitions for the 973 */
#define MII_LXT973_PCR 16 /* Port Configuration Register */
@@ -75,10 +78,50 @@ static int lxt970_ack_interrupt(struct phy_device *phydev)
static int lxt970_config_intr(struct phy_device *phydev)
{
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- return phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
- else
- return phy_write(phydev, MII_LXT970_IER, 0);
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lxt970_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
+ } else {
+ err = phy_write(phydev, MII_LXT970_IER, 0);
+ if (err)
+ return err;
+
+ err = lxt970_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t lxt970_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ /* The interrupt status register is cleared by reading BMSR
+ * followed by MII_LXT970_ISR
+ */
+ irq_status = phy_read(phydev, MII_BMSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_status = phy_read(phydev, MII_LXT970_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_LXT970_IRS_MINT))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int lxt970_config_init(struct phy_device *phydev)
@@ -99,10 +142,41 @@ static int lxt971_ack_interrupt(struct phy_device *phydev)
static int lxt971_config_intr(struct phy_device *phydev)
{
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- return phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
- else
- return phy_write(phydev, MII_LXT971_IER, 0);
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lxt971_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
+ } else {
+ err = phy_write(phydev, MII_LXT971_IER, 0);
+ if (err)
+ return err;
+
+ err = lxt971_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t lxt971_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_LXT971_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_LXT971_ISR_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
/*
@@ -237,15 +311,15 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.config_init = lxt970_config_init,
- .ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
+ .handle_interrupt = lxt970_handle_interrupt,
}, {
.phy_id = 0x001378e0,
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
+ .handle_interrupt = lxt971_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5aec673a0120..587930a7f48b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -80,8 +80,11 @@
#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
#define MII_M1111_HWCFG_MODE_RTBI 0x7
+#define MII_M1111_HWCFG_MODE_COPPER_1000X_AN 0x8
#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
+#define MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN 0xc
+#define MII_M1111_HWCFG_SERIAL_AN_BYPASS BIT(12)
#define MII_M1111_HWCFG_FIBER_COPPER_RES BIT(13)
#define MII_M1111_HWCFG_FIBER_COPPER_AUTO BIT(15)
@@ -314,16 +317,43 @@ static int marvell_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = marvell_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_M1011_IMASK,
MII_M1011_IMASK_INIT);
- else
+ } else {
err = phy_write(phydev, MII_M1011_IMASK,
MII_M1011_IMASK_CLEAR);
+ if (err)
+ return err;
+
+ err = marvell_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t marvell_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_M1011_IEVENT);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_M1011_IMASK_INIT))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int marvell_set_polarity(struct phy_device *phydev, int polarity)
{
int reg;
@@ -629,6 +659,51 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
return genphy_check_and_restart_aneg(phydev, changed);
}
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+ int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ int err;
+
+ if (extsr < 0)
+ return extsr;
+
+ /* If not using SGMII or copper 1000BaseX modes, use normal process.
+ * Steps below are only required for these modes.
+ */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ (extsr & MII_M1111_HWCFG_MODE_MASK) !=
+ MII_M1111_HWCFG_MODE_COPPER_1000X_AN)
+ return marvell_config_aneg(phydev);
+
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ if (err < 0)
+ goto error;
+
+ /* Configure the copper link first */
+ err = marvell_config_aneg(phydev);
+ if (err < 0)
+ goto error;
+
+ /* Do not touch the fiber page if we're in copper->sgmii mode */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+ return 0;
+
+ /* Then the fiber link */
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
+ if (err < 0)
+ goto error;
+
+ err = marvell_config_aneg_fiber(phydev);
+ if (err < 0)
+ goto error;
+
+ return marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+
+error:
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ return err;
+}
+
static int m88e1510_config_aneg(struct phy_device *phydev)
{
int err;
@@ -696,7 +771,7 @@ static void marvell_config_led(struct phy_device *phydev)
static int marvell_config_init(struct phy_device *phydev)
{
- /* Set defalut LED */
+ /* Set default LED */
marvell_config_led(phydev);
/* Set registers from marvell,reg-init DT property */
@@ -814,6 +889,28 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev)
MII_M1111_HWCFG_FIBER_COPPER_AUTO);
}
+static int m88e1111_config_init_1000basex(struct phy_device *phydev)
+{
+ int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ int err, mode;
+
+ if (extsr < 0)
+ return extsr;
+
+ /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled */
+ mode = extsr & MII_M1111_HWCFG_MODE_MASK;
+ if (mode == MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN) {
+ err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+ MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_SERIAL_AN_BYPASS,
+ MII_M1111_HWCFG_MODE_COPPER_1000X_AN |
+ MII_M1111_HWCFG_SERIAL_AN_BYPASS);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
static int m88e1111_config_init(struct phy_device *phydev)
{
int err;
@@ -836,6 +933,12 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ if (phydev->interface == PHY_INTERFACE_MODE_1000BASEX) {
+ err = m88e1111_config_init_1000basex(phydev);
+ if (err < 0)
+ return err;
+ }
+
err = marvell_of_reg_init(phydev);
if (err < 0)
return err;
@@ -1583,18 +1686,6 @@ static int marvell_aneg_done(struct phy_device *phydev)
return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED);
}
-static int m88e1121_did_interrupt(struct phy_device *phydev)
-{
- int imask;
-
- imask = phy_read(phydev, MII_M1011_IEVENT);
-
- if (imask & MII_M1011_IMASK_INIT)
- return 1;
-
- return 0;
-}
-
static void m88e1318_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
@@ -2621,8 +2712,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = marvell_config_init,
.config_aneg = m88e1101_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2639,8 +2730,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1111_config_init,
.config_aneg = marvell_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2658,10 +2749,31 @@ static struct phy_driver marvell_drivers[] = {
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = m88e1111_config_init,
- .config_aneg = marvell_config_aneg,
+ .config_aneg = m88e1111_config_aneg,
+ .read_status = marvell_read_status,
+ .config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ .get_tunable = m88e1111_get_tunable,
+ .set_tunable = m88e1111_set_tunable,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88E1111_FINISAR,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E1111 (Finisar)",
+ /* PHY_GBIT_FEATURES */
+ .probe = marvell_probe,
+ .config_init = m88e1111_config_init,
+ .config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2680,8 +2792,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1118_config_init,
.config_aneg = m88e1118_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2699,9 +2811,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1121_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2721,9 +2832,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = m88e1318_config_init,
.config_aneg = m88e1318_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.get_wol = m88e1318_get_wol,
.set_wol = m88e1318_set_wol,
.resume = genphy_resume,
@@ -2743,8 +2853,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = m88e1145_config_init,
.config_aneg = m88e1101_config_aneg,
.read_status = genphy_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2763,8 +2873,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1149_config_init,
.config_aneg = m88e1118_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2781,8 +2891,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1111_config_init,
.config_aneg = marvell_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2798,8 +2908,8 @@ static struct phy_driver marvell_drivers[] = {
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = m88e1116r_config_init,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2820,9 +2930,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = m88e1510_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.get_wol = m88e1318_get_wol,
.set_wol = m88e1318_set_wol,
.resume = marvell_resume,
@@ -2849,9 +2958,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2875,9 +2983,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2900,9 +3007,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = m88e3016_config_init,
.aneg_done = marvell_aneg_done,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2921,9 +3027,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e6390_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2946,9 +3051,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2968,9 +3072,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2989,6 +3092,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E1111_FINISAR, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1118, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1121R, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1145, MARVELL_PHY_ID_MASK },
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 757e950fb745..e59067c64e97 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -472,7 +472,7 @@ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
#endif
/**
- * mdiobus_create_device_from_board_info - create a full MDIO device given
+ * mdiobus_create_device - create a full MDIO device given
* a mdio_board_info structure
* @bus: MDIO bus to create the devices on
* @bi: mdio_board_info structure describing the devices
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index a644e8e5071c..9f1f2b6c97d4 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -44,16 +44,32 @@ static int lan88xx_phy_config_intr(struct phy_device *phydev)
LAN88XX_INT_MASK_LINK_CHANGE_);
} else {
rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
+ if (rc)
+ return rc;
+
+ /* Ack interrupts after they have been disabled */
+ rc = phy_read(phydev, LAN88XX_INT_STS);
}
return rc < 0 ? rc : 0;
}
-static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
{
- int rc = phy_read(phydev, LAN88XX_INT_STS);
+ int irq_status;
- return rc < 0 ? rc : 0;
+ irq_status = phy_read(phydev, LAN88XX_INT_STS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int lan88xx_suspend(struct phy_device *phydev)
@@ -340,8 +356,8 @@ static struct phy_driver microchip_phy_driver[] = {
.config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
- .ack_interrupt = lan88xx_phy_ack_interrupt,
.config_intr = lan88xx_phy_config_intr,
+ .handle_interrupt = lan88xx_handle_interrupt,
.suspend = lan88xx_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index fed3e395f18e..4dc00bd5a8d2 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -189,18 +189,34 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
- }
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ } else {
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc)
+ return rc;
- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ }
return rc < 0 ? rc : 0;
}
-static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
{
- int rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ int irq_status;
- return rc < 0 ? rc : 0;
+ irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int lan87xx_config_init(struct phy_device *phydev)
@@ -219,10 +235,9 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
- .config_aneg = genphy_config_aneg,
- .ack_interrupt = lan87xx_phy_ack_interrupt,
.config_intr = lan87xx_phy_config_intr,
+ .handle_interrupt = lan87xx_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 6bc7406a1ce7..2f2157e3deab 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -1498,7 +1498,7 @@ static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev)
vsc8584_handle_macsec_interrupt(phydev);
if (irq_status & MII_VSC85XX_INT_MASK_LINK_CHG)
- phy_mac_interrupt(phydev);
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
@@ -1541,16 +1541,6 @@ static int vsc85xx_config_init(struct phy_device *phydev)
return 0;
}
-static int vsc8584_did_interrupt(struct phy_device *phydev)
-{
- int rc = 0;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
-
- return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK;
-}
-
static int vsc8514_config_pre_init(struct phy_device *phydev)
{
/* These are the settings to override the silicon default
@@ -1933,6 +1923,10 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ rc = vsc85xx_ack_interrupt(phydev);
+ if (rc)
+ return rc;
+
vsc8584_config_macsec_intr(phydev);
vsc8584_config_ts_intr(phydev);
@@ -1943,11 +1937,33 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
if (rc < 0)
return rc;
rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (rc < 0)
+ return rc;
+
+ rc = vsc85xx_ack_interrupt(phydev);
}
return rc;
}
+static irqreturn_t vsc85xx_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_VSC85XX_INT_MASK_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int vsc85xx_config_aneg(struct phy_device *phydev)
{
int rc;
@@ -2114,7 +2130,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2139,9 +2155,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2163,7 +2178,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8514_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2187,7 +2202,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2211,7 +2226,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2235,7 +2250,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2259,7 +2274,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2283,9 +2298,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2308,9 +2322,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2333,9 +2346,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2359,9 +2370,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2386,9 +2396,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2411,9 +2419,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2436,9 +2442,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index b97ee79f3cdf..d8a61456d1ce 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -136,7 +136,7 @@ static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
- if (!cond || (cond && upper))
+ if (!cond || upper)
phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
@@ -1510,6 +1510,8 @@ void vsc8584_config_ts_intr(struct phy_device *phydev)
int vsc8584_ptp_init(struct phy_device *phydev)
{
switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC8572:
+ case PHY_ID_VSC8574:
case PHY_ID_VSC8575:
case PHY_ID_VSC8582:
case PHY_ID_VSC8584:
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index a72fa0d2e7c7..afd7afa1f498 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -44,6 +44,9 @@
#define MII_CFG2_SLEEP_REQUEST_TO_16MS 0x3
#define MII_INTSRC 21
+#define MII_INTSRC_LINK_FAIL BIT(10)
+#define MII_INTSRC_LINK_UP BIT(9)
+#define MII_INTSRC_MASK (MII_INTSRC_LINK_FAIL | MII_INTSRC_LINK_UP)
#define MII_INTSRC_TEMP_ERR BIT(1)
#define MII_INTSRC_UV_ERR BIT(3)
@@ -597,11 +600,42 @@ static int tja11xx_ack_interrupt(struct phy_device *phydev)
static int tja11xx_config_intr(struct phy_device *phydev)
{
int value = 0;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = tja11xx_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP;
+ err = phy_write(phydev, MII_INTEN, value);
+ } else {
+ err = phy_write(phydev, MII_INTEN, value);
+ if (err)
+ return err;
+
+ err = tja11xx_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_INTSRC);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_INTSRC_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- return phy_write(phydev, MII_INTEN, value);
+ return IRQ_HANDLED;
}
static int tja11xx_cable_test_start(struct phy_device *phydev)
@@ -747,8 +781,8 @@ static struct phy_driver tja11xx_driver[] = {
.get_sset_count = tja11xx_get_sset_count,
.get_strings = tja11xx_get_strings,
.get_stats = tja11xx_get_stats,
- .ack_interrupt = tja11xx_ack_interrupt,
.config_intr = tja11xx_config_intr,
+ .handle_interrupt = tja11xx_handle_interrupt,
.cable_test_start = tja11xx_cable_test_start,
.cable_test_get_status = tja11xx_cable_test_get_status,
}, {
@@ -770,8 +804,8 @@ static struct phy_driver tja11xx_driver[] = {
.get_sset_count = tja11xx_get_sset_count,
.get_strings = tja11xx_get_strings,
.get_stats = tja11xx_get_stats,
- .ack_interrupt = tja11xx_ack_interrupt,
.config_intr = tja11xx_config_intr,
+ .handle_interrupt = tja11xx_handle_interrupt,
.cable_test_start = tja11xx_cable_test_start,
.cable_test_get_status = tja11xx_cable_test_get_status,
}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index bd11e62bfdfe..077f2929c45e 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -9,7 +9,7 @@
#include <linux/phy.h>
/**
- * genphy_c45_setup_forced - configures a forced speed
+ * genphy_c45_pma_setup_forced - configures a forced speed
* @phydev: target phy_device struct
*/
int genphy_c45_pma_setup_forced(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 35525a671400..dce86bad8231 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -489,14 +489,15 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
EXPORT_SYMBOL(phy_queue_state_machine);
/**
- * phy_queue_state_machine - Trigger the state machine to run now
+ * phy_trigger_machine - Trigger the state machine to run now
*
* @phydev: the phy_device struct
*/
-static void phy_trigger_machine(struct phy_device *phydev)
+void phy_trigger_machine(struct phy_device *phydev)
{
phy_queue_state_machine(phydev, 0);
}
+EXPORT_SYMBOL(phy_trigger_machine);
static void phy_abort_cable_test(struct phy_device *phydev)
{
@@ -924,7 +925,7 @@ void phy_stop_machine(struct phy_device *phydev)
* Must not be called from interrupt context, or while the
* phydev->lock is held.
*/
-static void phy_error(struct phy_device *phydev)
+void phy_error(struct phy_device *phydev)
{
WARN_ON(1);
@@ -934,6 +935,7 @@ static void phy_error(struct phy_device *phydev)
phy_trigger_machine(phydev);
}
+EXPORT_SYMBOL(phy_error);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 5dab6be6fc38..81f672911305 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1156,7 +1156,7 @@ void phy_attached_info(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_attached_info);
-#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%s)"
+#define ATTACHED_FMT "attached PHY driver %s(mii_bus:phy_addr=%s, irq=%s)"
char *phy_attached_info_irq(struct phy_device *phydev)
{
char *irq_str;
@@ -1181,19 +1181,17 @@ EXPORT_SYMBOL(phy_attached_info_irq);
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
{
- const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+ const char *unbound = phydev->drv ? "" : "[unbound] ";
char *irq_str = phy_attached_info_irq(phydev);
if (!fmt) {
- phydev_info(phydev, ATTACHED_FMT "\n",
- drv_name, phydev_name(phydev),
- irq_str);
+ phydev_info(phydev, ATTACHED_FMT "\n", unbound,
+ phydev_name(phydev), irq_str);
} else {
va_list ap;
- phydev_info(phydev, ATTACHED_FMT,
- drv_name, phydev_name(phydev),
- irq_str);
+ phydev_info(phydev, ATTACHED_FMT, unbound,
+ phydev_name(phydev), irq_str);
va_start(ap, fmt);
vprintk(fmt, ap);
@@ -2463,6 +2461,19 @@ int genphy_soft_reset(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_soft_reset);
+irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev)
+{
+ /* It seems there are cases where the interrupts are handled by another
+ * entity (ie an IRQ controller embedded inside the PHY) and do not
+ * need any other interraction from phylib. In this case, just trigger
+ * the state machine directly.
+ */
+ phy_trigger_machine(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_handle_interrupt_no_ack);
+
/**
* genphy_read_abilities - read PHY abilities from Clause 22 registers
* @phydev: target phy_device struct
@@ -2735,7 +2746,7 @@ static int phy_get_int_delay_property(struct device *dev, const char *name)
#endif
/**
- * phy_get_delay_index - returns the index of the internal delay
+ * phy_get_internal_delay - returns the index of the internal delay
* @phydev: phy_device struct
* @dev: pointer to the devices device struct
* @delay_values: array of delays the PHY supports
@@ -2815,7 +2826,7 @@ EXPORT_SYMBOL(phy_get_internal_delay);
static bool phy_drv_supports_irq(struct phy_driver *phydrv)
{
- return phydrv->config_intr && phydrv->ack_interrupt;
+ return phydrv->config_intr && (phydrv->ack_interrupt || phydrv->handle_interrupt);
}
/**
@@ -2947,6 +2958,13 @@ static int phy_remove(struct device *dev)
return 0;
}
+static void phy_shutdown(struct device *dev)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ phy_disable_interrupts(phydev);
+}
+
/**
* phy_driver_register - register a phy_driver with the PHY layer
* @new_driver: new phy_driver to register
@@ -2970,6 +2988,7 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
new_driver->mdiodrv.driver.probe = phy_probe;
new_driver->mdiodrv.driver.remove = phy_remove;
+ new_driver->mdiodrv.driver.shutdown = phy_shutdown;
new_driver->mdiodrv.driver.owner = owner;
new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 59a94e07e7c5..f550576eb9da 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -66,11 +66,11 @@ static void phy_led_trigger_format_name(struct phy_device *phy, char *buf,
static int phy_led_trigger_register(struct phy_device *phy,
struct phy_led_trigger *plt,
- unsigned int speed)
+ unsigned int speed,
+ const char *suffix)
{
plt->speed = speed;
- phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name),
- phy_speed_to_str(speed));
+ phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name), suffix);
plt->trigger.name = plt->name;
return led_trigger_register(&plt->trigger);
@@ -99,12 +99,7 @@ int phy_led_triggers_register(struct phy_device *phy)
goto out_clear;
}
- phy_led_trigger_format_name(phy, phy->led_link_trigger->name,
- sizeof(phy->led_link_trigger->name),
- "link");
- phy->led_link_trigger->trigger.name = phy->led_link_trigger->name;
-
- err = led_trigger_register(&phy->led_link_trigger->trigger);
+ err = phy_led_trigger_register(phy, phy->led_link_trigger, 0, "link");
if (err)
goto out_free_link;
@@ -119,7 +114,8 @@ int phy_led_triggers_register(struct phy_device *phy)
for (i = 0; i < phy->phy_num_led_triggers; i++) {
err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
- speeds[i]);
+ speeds[i],
+ phy_speed_to_str(speeds[i]));
if (err)
goto out_unreg;
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index fe2296fdda19..84f6e197f965 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1649,7 +1649,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
EXPORT_SYMBOL_GPL(phylink_ethtool_set_pauseparam);
/**
- * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error
+ * phylink_get_eee_err() - read the energy efficient ethernet error
* counter
* @pl: a pointer to a &struct phylink returned from phylink_create().
*
@@ -2515,9 +2515,10 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
changed = ret > 0;
+ /* Ensure ISOLATE bit is disabled */
bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0;
ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR,
- BMCR_ANENABLE, bmcr);
+ BMCR_ANENABLE | BMCR_ISOLATE, bmcr);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 575580d3ffe0..f71eda945c6a 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -41,6 +41,12 @@
#define RTL8211E_RX_DELAY BIT(11)
#define RTL8201F_ISR 0x1e
+#define RTL8201F_ISR_ANERR BIT(15)
+#define RTL8201F_ISR_DUPLEX BIT(13)
+#define RTL8201F_ISR_LINK BIT(11)
+#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
+ RTL8201F_ISR_DUPLEX | \
+ RTL8201F_ISR_LINK)
#define RTL8201F_IER 0x13
#define RTL8366RB_POWER_SAVE 0x15
@@ -102,24 +108,45 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
static int rtl8201_config_intr(struct phy_device *phydev)
{
u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl8201_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
val = BIT(13) | BIT(12) | BIT(11);
- else
+ err = phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ } else {
val = 0;
+ err = phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ if (err)
+ return err;
- return phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ err = rtl8201_ack_interrupt(phydev);
+ }
+
+ return err;
}
static int rtl8211b_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl821x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, RTL821x_INER,
RTL8211B_INER_INIT);
- else
+ } else {
err = phy_write(phydev, RTL821x_INER, 0);
+ if (err)
+ return err;
+
+ err = rtl821x_ack_interrupt(phydev);
+ }
return err;
}
@@ -128,11 +155,20 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl821x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, RTL821x_INER,
RTL8211E_INER_LINK_STATUS);
- else
+ } else {
err = phy_write(phydev, RTL821x_INER, 0);
+ if (err)
+ return err;
+
+ err = rtl821x_ack_interrupt(phydev);
+ }
return err;
}
@@ -140,13 +176,85 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
static int rtl8211f_config_intr(struct phy_device *phydev)
{
u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl8211f_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
val = RTL8211F_INER_LINK_STATUS;
- else
+ err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ } else {
val = 0;
+ err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ if (err)
+ return err;
+
+ err = rtl8211f_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t rtl8201_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, RTL8201F_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & RTL8201F_ISR_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rtl821x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_enabled;
+
+ irq_status = phy_read(phydev, RTL821x_INSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_enabled = phy_read(phydev, RTL821x_INER);
+ if (irq_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & RTL8211F_INER_LINK_STATUS))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int rtl8211_config_aneg(struct phy_device *phydev)
@@ -556,8 +664,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
+ .handle_interrupt = rtl8201_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -582,8 +690,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
.suspend = rtl8211b_suspend,
@@ -601,8 +709,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -611,8 +719,8 @@ static struct phy_driver realtek_drvs[] = {
PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
.config_init = &rtl8211e_config_init,
- .ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -621,8 +729,8 @@ static struct phy_driver realtek_drvs[] = {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
.config_init = &rtl8211f_config_init,
- .ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
+ .handle_interrupt = rtl8211f_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -662,6 +770,46 @@ static struct phy_driver realtek_drvs[] = {
.read_mmd = rtl822x_read_mmd,
.write_mmd = rtl822x_write_mmd,
}, {
+ PHY_ID_MATCH_EXACT(0x001cc838),
+ .name = "RTL8226-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc848),
+ .name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc849),
+ .name = "RTL8221B-VB-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc84a),
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
@@ -670,8 +818,8 @@ static struct phy_driver realtek_drvs[] = {
* irq is requested and ACKed by reading the status register,
* which is done by the irqchip code.
*/
- .ack_interrupt = genphy_no_ack_interrupt,
.config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 0fc39ac5ca88..33372756a451 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -48,6 +48,13 @@ struct smsc_phy_priv {
struct clk *refclk;
};
+static int smsc_phy_ack_interrupt(struct phy_device *phydev)
+{
+ int rc = phy_read(phydev, MII_LAN83C185_ISF);
+
+ return rc < 0 ? rc : 0;
+}
+
static int smsc_phy_config_intr(struct phy_device *phydev)
{
struct smsc_phy_priv *priv = phydev->priv;
@@ -55,21 +62,47 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ rc = smsc_phy_ack_interrupt(phydev);
+ if (rc)
+ return rc;
+
intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
if (priv->energy_enable)
intmask |= MII_LAN83C185_ISF_INT7;
- }
+ rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
+ } else {
+ rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
+ if (rc)
+ return rc;
- rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
+ rc = smsc_phy_ack_interrupt(phydev);
+ }
return rc < 0 ? rc : 0;
}
-static int smsc_phy_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
{
- int rc = phy_read (phydev, MII_LAN83C185_ISF);
+ int irq_status, irq_enabled;
- return rc < 0 ? rc : 0;
+ irq_enabled = phy_read(phydev, MII_LAN83C185_IM);
+ if (irq_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_status = phy_read(phydev, MII_LAN83C185_ISF);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int smsc_phy_config_init(struct phy_device *phydev)
@@ -314,8 +347,8 @@ static struct phy_driver smsc_phy_driver[] = {
.soft_reset = smsc_phy_reset,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -333,8 +366,8 @@ static struct phy_driver smsc_phy_driver[] = {
.soft_reset = smsc_phy_reset,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
/* Statistics */
.get_sset_count = smsc_get_sset_count,
@@ -362,8 +395,8 @@ static struct phy_driver smsc_phy_driver[] = {
.config_aneg = lan87xx_config_aneg,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
/* Statistics */
.get_sset_count = smsc_get_sset_count,
@@ -385,8 +418,8 @@ static struct phy_driver smsc_phy_driver[] = {
.config_init = lan911x_config_init,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -410,8 +443,8 @@ static struct phy_driver smsc_phy_driver[] = {
.config_aneg = lan87xx_config_aneg_ext,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
/* Statistics */
.get_sset_count = smsc_get_sset_count,
@@ -436,8 +469,8 @@ static struct phy_driver smsc_phy_driver[] = {
.soft_reset = smsc_phy_reset,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
/* Statistics */
.get_sset_count = smsc_get_sset_count,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index d735a01380ed..431fe5e0ce31 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -48,32 +48,55 @@ static int ste10Xp_config_init(struct phy_device *phydev)
return 0;
}
+static int ste10Xp_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_XCIIS);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static int ste10Xp_config_intr(struct phy_device *phydev)
{
- int err, value;
+ int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* clear any pending interrupts */
+ err = ste10Xp_ack_interrupt(phydev);
+ if (err)
+ return err;
+
/* Enable all STe101P interrupts (PR12) */
err = phy_write(phydev, MII_XIE, MII_XIE_DEFAULT_MASK);
- /* clear any pending interrupts */
- if (err == 0) {
- value = phy_read(phydev, MII_XCIIS);
- if (value < 0)
- err = value;
- }
- } else
+ } else {
err = phy_write(phydev, MII_XIE, 0);
+ if (err)
+ return err;
+
+ err = ste10Xp_ack_interrupt(phydev);
+ }
return err;
}
-static int ste10Xp_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t ste10Xp_handle_interrupt(struct phy_device *phydev)
{
- int err = phy_read(phydev, MII_XCIIS);
- if (err < 0)
- return err;
+ int irq_status;
- return 0;
+ irq_status = phy_read(phydev, MII_XCIIS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_XIE_DEFAULT_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static struct phy_driver ste10xp_pdriver[] = {
@@ -83,8 +106,8 @@ static struct phy_driver ste10xp_pdriver[] = {
.name = "STe101p",
/* PHY_BASIC_FEATURES */
.config_init = ste10Xp_config_init,
- .ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
+ .handle_interrupt = ste10Xp_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -93,8 +116,8 @@ static struct phy_driver ste10xp_pdriver[] = {
.name = "STe100p",
/* PHY_BASIC_FEATURES */
.config_init = ste10Xp_config_init,
- .ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
+ .handle_interrupt = ste10Xp_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index bb680352708a..16704e243162 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -40,6 +40,11 @@
#define MII_VSC8244_ISTAT_SPEED 0x4000
#define MII_VSC8244_ISTAT_LINK 0x2000
#define MII_VSC8244_ISTAT_DUPLEX 0x1000
+#define MII_VSC8244_ISTAT_MASK (MII_VSC8244_ISTAT_SPEED | \
+ MII_VSC8244_ISTAT_LINK | \
+ MII_VSC8244_ISTAT_DUPLEX)
+
+#define MII_VSC8221_ISTAT_MASK MII_VSC8244_ISTAT_LINK
/* Vitesse Auxiliary Control/Status Register */
#define MII_VSC8244_AUX_CONSTAT 0x1c
@@ -270,25 +275,14 @@ static int vsc8601_config_init(struct phy_device *phydev)
return 0;
}
-static int vsc824x_ack_interrupt(struct phy_device *phydev)
-{
- int err = 0;
-
- /* Don't bother to ACK the interrupts if interrupts
- * are disabled. The 824x cannot clear the interrupts
- * if they are disabled.
- */
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_read(phydev, MII_VSC8244_ISTAT);
-
- return (err < 0) ? err : 0;
-}
-
static int vsc82xx_config_intr(struct phy_device *phydev)
{
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ /* Don't bother to ACK the interrupts since the 824x cannot
+ * clear the interrupts if they are disabled.
+ */
err = phy_write(phydev, MII_VSC8244_IMASK,
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
@@ -311,6 +305,31 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
return err;
}
+static irqreturn_t vsc82xx_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_mask;
+
+ if (phydev->drv->phy_id == PHY_ID_VSC8244 ||
+ phydev->drv->phy_id == PHY_ID_VSC8572 ||
+ phydev->drv->phy_id == PHY_ID_VSC8601)
+ irq_mask = MII_VSC8244_ISTAT_MASK;
+ else
+ irq_mask = MII_VSC8221_ISTAT_MASK;
+
+ irq_status = phy_read(phydev, MII_VSC8244_ISTAT);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_mask))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int vsc8221_config_init(struct phy_device *phydev)
{
int err;
@@ -390,8 +409,8 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
.phy_id = PHY_ID_VSC8244,
.name = "Vitesse VSC8244",
@@ -399,8 +418,8 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
.phy_id = PHY_ID_VSC8572,
.name = "Vitesse VSC8572",
@@ -408,16 +427,16 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
.phy_id = PHY_ID_VSC8601,
.name = "Vitesse VSC8601",
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = &vsc8601_config_init,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
.phy_id = PHY_ID_VSC7385,
.name = "Vitesse VSC7385",
@@ -461,8 +480,8 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
/* Vitesse 8221 */
.phy_id = PHY_ID_VSC8221,
@@ -470,8 +489,8 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8221",
/* PHY_GBIT_FEATURES */
.config_init = &vsc8221_config_init,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
/* Vitesse 8211 */
.phy_id = PHY_ID_VSC8211,
@@ -479,8 +498,8 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8211",
/* PHY_GBIT_FEATURES */
.config_init = &vsc8221_config_init,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
} };
module_phy_driver(vsc82xx_driver);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 07f1f3933927..b4092127a92c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -975,11 +975,11 @@ static void team_port_disable(struct team *team,
}
#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
static void __team_compute_features(struct team *team)
{
@@ -1009,8 +1009,7 @@ static void __team_compute_features(struct team *team)
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX |
- NETIF_F_GSO_UDP_L4;
+ NETIF_F_HW_VLAN_STAG_TX;
team->dev->hard_header_len = max_hard_header_len;
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -2175,7 +2174,7 @@ static void team_setup(struct net_device *dev)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+ dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
dev->features |= dev->hw_features;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index be69d272052f..3d45d56172cb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -107,17 +107,6 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
-struct tun_pcpu_stats {
- u64_stats_t rx_packets;
- u64_stats_t rx_bytes;
- u64_stats_t tx_packets;
- u64_stats_t tx_bytes;
- struct u64_stats_sync syncp;
- u32 rx_dropped;
- u32 tx_dropped;
- u32 rx_frame_errors;
-};
-
/* A tun_file connects an open character device to a tuntap netdevice. It
* also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
@@ -207,7 +196,7 @@ struct tun_struct {
void *security;
u32 flow_count;
u32 rx_batched;
- struct tun_pcpu_stats __percpu *pcpu_stats;
+ atomic_long_t rx_frame_errors;
struct bpf_prog __rcu *xdp_prog;
struct tun_prog __rcu *steering_prog;
struct tun_prog __rcu *filter_prog;
@@ -1066,7 +1055,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- this_cpu_inc(tun->pcpu_stats->tx_dropped);
+ atomic_long_inc(&dev->tx_dropped);
skb_tx_error(skb);
kfree_skb(skb);
rcu_read_unlock();
@@ -1103,37 +1092,12 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
static void
tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
struct tun_struct *tun = netdev_priv(dev);
- struct tun_pcpu_stats *p;
- int i;
-
- for_each_possible_cpu(i) {
- u64 rxpackets, rxbytes, txpackets, txbytes;
- unsigned int start;
- p = per_cpu_ptr(tun->pcpu_stats, i);
- do {
- start = u64_stats_fetch_begin(&p->syncp);
- rxpackets = u64_stats_read(&p->rx_packets);
- rxbytes = u64_stats_read(&p->rx_bytes);
- txpackets = u64_stats_read(&p->tx_packets);
- txbytes = u64_stats_read(&p->tx_bytes);
- } while (u64_stats_fetch_retry(&p->syncp, start));
+ dev_get_tstats64(dev, stats);
- stats->rx_packets += rxpackets;
- stats->rx_bytes += rxbytes;
- stats->tx_packets += txpackets;
- stats->tx_bytes += txbytes;
-
- /* u32 counters */
- rx_dropped += p->rx_dropped;
- rx_frame_errors += p->rx_frame_errors;
- tx_dropped += p->tx_dropped;
- }
- stats->rx_dropped = rx_dropped;
- stats->rx_frame_errors = rx_frame_errors;
- stats->tx_dropped = tx_dropped;
+ stats->rx_frame_errors +=
+ (unsigned long)atomic_long_read(&tun->rx_frame_errors);
}
static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
@@ -1247,7 +1211,7 @@ resample:
void *frame = tun_xdp_to_ptr(xdp);
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
- this_cpu_inc(tun->pcpu_stats->tx_dropped);
+ atomic_long_inc(&dev->tx_dropped);
xdp_return_frame_rx_napi(xdp);
drops++;
}
@@ -1283,7 +1247,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_select_queue = tun_select_queue,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
- .ndo_get_stats64 = tun_net_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_bpf = tun_xdp,
.ndo_xdp_xmit = tun_xdp_xmit,
.ndo_change_carrier = tun_net_change_carrier,
@@ -1577,7 +1541,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
trace_xdp_exception(tun->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
break;
}
@@ -1683,7 +1647,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
size_t total_len = iov_iter_count(from);
size_t len = total_len, align = tun->align, linear;
struct virtio_net_hdr gso = { 0 };
- struct tun_pcpu_stats *stats;
int good_linear;
int copylen;
bool zerocopy = false;
@@ -1752,7 +1715,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
if (IS_ERR(skb)) {
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
return PTR_ERR(skb);
}
if (!skb)
@@ -1781,7 +1744,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
if (frags)
mutex_unlock(&tfile->napi_mutex);
return PTR_ERR(skb);
@@ -1795,7 +1758,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (err) {
err = -EFAULT;
drop:
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
kfree_skb(skb);
if (frags) {
tfile->napi.skb = NULL;
@@ -1807,7 +1770,7 @@ drop:
}
if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
+ atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
if (frags) {
tfile->napi.skb = NULL;
@@ -1830,7 +1793,7 @@ drop:
pi.proto = htons(ETH_P_IPV6);
break;
default:
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
kfree_skb(skb);
return -EINVAL;
}
@@ -1910,7 +1873,7 @@ drop:
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
@@ -1942,12 +1905,9 @@ drop:
}
rcu_read_unlock();
- stats = get_cpu_ptr(tun->pcpu_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_packets);
- u64_stats_add(&stats->rx_bytes, len);
- u64_stats_update_end(&stats->syncp);
- put_cpu_ptr(stats);
+ preempt_disable();
+ dev_sw_netstats_rx_add(tun->dev, len);
+ preempt_enable();
if (rxhash)
tun_flow_update(tun, rxhash, tfile);
@@ -1979,7 +1939,6 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
{
int vnet_hdr_sz = 0;
size_t size = xdp_frame->len;
- struct tun_pcpu_stats *stats;
size_t ret;
if (tun->flags & IFF_VNET_HDR) {
@@ -1996,12 +1955,9 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
- stats = get_cpu_ptr(tun->pcpu_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_packets);
- u64_stats_add(&stats->tx_bytes, ret);
- u64_stats_update_end(&stats->syncp);
- put_cpu_ptr(tun->pcpu_stats);
+ preempt_disable();
+ dev_sw_netstats_tx_add(tun->dev, 1, ret);
+ preempt_enable();
return ret;
}
@@ -2013,7 +1969,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
struct iov_iter *iter)
{
struct tun_pi pi = { 0, skb->protocol };
- struct tun_pcpu_stats *stats;
ssize_t total;
int vlan_offset = 0;
int vlan_hlen = 0;
@@ -2091,12 +2046,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
done:
/* caller is in process context, */
- stats = get_cpu_ptr(tun->pcpu_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_packets);
- u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
- u64_stats_update_end(&stats->syncp);
- put_cpu_ptr(tun->pcpu_stats);
+ preempt_disable();
+ dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
+ preempt_enable();
return total;
}
@@ -2235,11 +2187,11 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
- free_percpu(tun->pcpu_stats);
- /* We clear pcpu_stats so that tun_set_iff() can tell if
+ free_percpu(dev->tstats);
+ /* We clear tstats so that tun_set_iff() can tell if
* tun_free_netdev() has been called from register_netdevice().
*/
- tun->pcpu_stats = NULL;
+ dev->tstats = NULL;
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
@@ -2370,7 +2322,6 @@ static int tun_xdp_one(struct tun_struct *tun,
unsigned int datasize = xdp->data_end - xdp->data;
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
struct virtio_net_hdr *gso = &hdr->gso;
- struct tun_pcpu_stats *stats;
struct bpf_prog *xdp_prog;
struct sk_buff *skb = NULL;
u32 rxhash = 0, act;
@@ -2428,7 +2379,7 @@ build:
skb_put(skb, xdp->data_end - xdp->data);
if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
+ atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
err = -EINVAL;
goto out;
@@ -2451,14 +2402,10 @@ build:
netif_receive_skb(skb);
- /* No need for get_cpu_ptr() here since this function is
+ /* No need to disable preemption here since this function is
* always called with bh disabled
*/
- stats = this_cpu_ptr(tun->pcpu_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_packets);
- u64_stats_add(&stats->rx_bytes, datasize);
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(tun->dev, datasize);
if (rxhash)
tun_flow_update(tun, rxhash, tfile);
@@ -2751,8 +2698,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->rx_batched = 0;
RCU_INIT_POINTER(tun->steering_prog, NULL);
- tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
- if (!tun->pcpu_stats) {
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats) {
err = -ENOMEM;
goto err_free_dev;
}
@@ -2807,16 +2754,16 @@ err_detach:
tun_detach_all(dev);
/* We are here because register_netdevice() has failed.
* If register_netdevice() already called tun_free_netdev()
- * while dealing with the error, tun->pcpu_stats has been cleared.
+ * while dealing with the error, dev->stats has been cleared.
*/
- if (!tun->pcpu_stats)
+ if (!dev->tstats)
goto err_free_dev;
err_free_flow:
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
err_free_stat:
- free_percpu(tun->pcpu_stats);
+ free_percpu(dev->tstats);
err_free_dev:
free_netdev(dev);
return err;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index b46993d5f997..1e3719028780 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -628,4 +628,13 @@ config USB_NET_AQC111
This driver should work with at least the following devices:
* Aquantia AQtion USB to 5GbE
+config USB_RTL8153_ECM
+ tristate "RTL8153 ECM support"
+ depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
+ default y
+ help
+ This option supports ECM mode for RTL8153 ethernet adapter, when
+ CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
+ supported by r8152 driver.
+
endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 99fd12be2111..4964f7b326fb 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
obj-$(CONFIG_USB_NET_AQC111) += aqc111.o
+obj-$(CONFIG_USB_RTL8153_ECM) += r8153_ecm.o
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 0717c18015c9..73b97f4cc1ec 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -641,7 +641,7 @@ static const struct net_device_ops aqc111_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = aqc111_change_mtu,
.ndo_set_mac_address = aqc111_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index ef548beba684..6e13d8165852 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -194,7 +194,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
@@ -580,7 +580,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
@@ -1050,7 +1050,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = asix_set_multicast,
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index fd3a04d98dc1..b404c9462dce 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -120,7 +120,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = phy_do_ioctl_running,
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 5541f3faedbc..d650b39b6e5d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1031,7 +1031,7 @@ static const struct net_device_ops ax88179_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = ax88179_change_mtu,
.ndo_set_mac_address = ax88179_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index eb100eb33de3..5db66272fc82 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -98,7 +98,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = cdc_ncm_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e04f588538cc..2bac57d5e8d5 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -793,7 +793,7 @@ static const struct net_device_ops cdc_ncm_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = cdc_ncm_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1317,7 +1317,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
break;
}
- /* calculate frame number withing this NDP */
+ /* calculate frame number within this NDP */
if (ctx->is_ndp16) {
ndplen = le16_to_cpu(ndp.ndp16->wLength);
index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 915ac75b55fc..b5d2ac55a874 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -343,7 +343,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = dm9601_ioctl,
.ndo_set_rx_mode = dm9601_set_multicast,
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index cb5bc1a7fa5a..ed05f992c612 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -133,7 +133,7 @@ static const struct net_device_ops int51x1_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = int51x1_set_multicast,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 65b315bc60ab..bf243edeb064 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -822,20 +822,19 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
int i;
- int ret;
u32 buf;
unsigned long timeout;
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
timeout = jiffies + HZ;
do {
usleep_range(1, 10);
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN");
@@ -845,18 +844,18 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
}
for (i = 0; i < length; i++) {
- ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ lan78xx_write_reg(dev, OTP_ADDR1,
((offset + i) >> 8) & OTP_ADDR1_15_11);
- ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ lan78xx_write_reg(dev, OTP_ADDR2,
((offset + i) & OTP_ADDR2_10_3));
- ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ lan78xx_read_reg(dev, OTP_STATUS, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_STATUS");
@@ -864,7 +863,7 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
}
} while (buf & OTP_STATUS_BUSY_);
- ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
data[i] = (u8)(buf & 0xFF);
}
@@ -876,20 +875,19 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
int i;
- int ret;
u32 buf;
unsigned long timeout;
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN completion");
@@ -899,21 +897,21 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
}
/* set to BYTE program mode */
- ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
for (i = 0; i < length; i++) {
- ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ lan78xx_write_reg(dev, OTP_ADDR1,
((offset + i) >> 8) & OTP_ADDR1_15_11);
- ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ lan78xx_write_reg(dev, OTP_ADDR2,
((offset + i) & OTP_ADDR2_10_3));
- ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
- ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
+ lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ lan78xx_read_reg(dev, OTP_STATUS, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"Timeout on OTP_STATUS completion");
@@ -1038,7 +1036,6 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
container_of(param, struct lan78xx_priv, set_multicast);
struct lan78xx_net *dev = pdata->dev;
int i;
- int ret;
netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
pdata->rfe_ctl);
@@ -1047,14 +1044,14 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
for (i = 1; i < NUM_OF_MAF; i++) {
- ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
- ret = lan78xx_write_reg(dev, MAF_LO(i),
+ lan78xx_write_reg(dev, MAF_HI(i), 0);
+ lan78xx_write_reg(dev, MAF_LO(i),
pdata->pfilter_table[i][1]);
- ret = lan78xx_write_reg(dev, MAF_HI(i),
+ lan78xx_write_reg(dev, MAF_HI(i),
pdata->pfilter_table[i][0]);
}
- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
}
static void lan78xx_set_multicast(struct net_device *netdev)
@@ -1124,7 +1121,6 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
u16 lcladv, u16 rmtadv)
{
u32 flow = 0, fct_flow = 0;
- int ret;
u8 cap;
if (dev->fc_autoneg)
@@ -1147,10 +1143,10 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
(cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
(cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
- ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+ lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
/* threshold value should be set before enabling flow */
- ret = lan78xx_write_reg(dev, FLOW, flow);
+ lan78xx_write_reg(dev, FLOW, flow);
return 0;
}
@@ -1663,11 +1659,10 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
static void lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
- int ret;
u8 addr[6];
- ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1700,12 +1695,12 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
}
- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
ether_addr_copy(dev->net->dev_addr, addr);
}
@@ -1838,7 +1833,7 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
struct phy_device *phydev = net->phydev;
- int ret, temp;
+ int temp;
/* At forced 100 F/H mode, chip may fail to set mode correctly
* when cable is switched between long(~50+m) and short one.
@@ -1849,7 +1844,7 @@ static void lan78xx_link_status_change(struct net_device *net)
/* disable phy interrupt */
temp = phy_read(phydev, LAN88XX_INT_MASK);
temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
temp = phy_read(phydev, MII_BMCR);
temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
@@ -1863,7 +1858,7 @@ static void lan78xx_link_status_change(struct net_device *net)
/* enable phy interrupt back */
temp = phy_read(phydev, LAN88XX_INT_MASK);
temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
}
}
@@ -1917,14 +1912,13 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
struct lan78xx_net *dev =
container_of(data, struct lan78xx_net, domain_data);
u32 buf;
- int ret;
/* call register access here because irq_bus_lock & irq_bus_sync_unlock
* are only two callbacks executed in non-atomic contex.
*/
- ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ lan78xx_read_reg(dev, INT_EP_CTL, &buf);
if (buf != data->irqenable)
- ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+ lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
mutex_unlock(&data->irq_lock);
}
@@ -1991,7 +1985,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
static int lan8835_fixup(struct phy_device *phydev)
{
int buf;
- int ret;
struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
@@ -2001,11 +1994,11 @@ static int lan8835_fixup(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
/* RGMII MAC TXC Delay Enable */
- ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
+ lan78xx_write_reg(dev, MAC_RGMII_ID,
MAC_RGMII_ID_TXC_DELAY_EN_);
/* RGMII TX DLL Tune Adjust */
- ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
+ lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
@@ -2189,28 +2182,27 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
{
- int ret = 0;
u32 buf;
bool rxenabled;
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
rxenabled = ((buf & MAC_RX_RXEN_) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
}
/* add 4 to size for FCS */
buf &= ~MAC_RX_MAX_SIZE_MASK_;
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
if (rxenabled) {
buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
}
return 0;
@@ -2267,13 +2259,12 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
int ll_mtu = new_mtu + netdev->hard_header_len;
int old_hard_mtu = dev->hard_mtu;
int old_rx_urb_size = dev->rx_urb_size;
- int ret;
/* no second zero-length packet read wanted after mtu-sized packets */
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
+ lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
netdev->mtu = new_mtu;
@@ -2296,7 +2287,6 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
u32 addr_lo, addr_hi;
- int ret;
if (netif_running(netdev))
return -EBUSY;
@@ -2313,12 +2303,12 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
addr_hi = netdev->dev_addr[4] |
netdev->dev_addr[5] << 8;
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
/* Added to support MAC address changes */
- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
return 0;
}
@@ -2330,7 +2320,6 @@ static int lan78xx_set_features(struct net_device *netdev,
struct lan78xx_net *dev = netdev_priv(netdev);
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
unsigned long flags;
- int ret;
spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
@@ -2354,7 +2343,7 @@ static int lan78xx_set_features(struct net_device *netdev,
spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
return 0;
}
@@ -3804,7 +3793,6 @@ static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
{
u32 buf;
- int ret;
int mask_index;
u16 crc;
u32 temp_wucsr;
@@ -3813,26 +3801,26 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
const u8 ipv6_multicast[3] = { 0x33, 0x33 };
const u8 arp_type[2] = { 0x08, 0x06 };
- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ lan78xx_read_reg(dev, MAC_TX, &buf);
buf &= ~MAC_TX_TXEN_;
- ret = lan78xx_write_reg(dev, MAC_TX, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_write_reg(dev, MAC_TX, buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
- ret = lan78xx_write_reg(dev, WUCSR, 0);
- ret = lan78xx_write_reg(dev, WUCSR2, 0);
- ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+ lan78xx_write_reg(dev, WUCSR, 0);
+ lan78xx_write_reg(dev, WUCSR2, 0);
+ lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
temp_wucsr = 0;
temp_pmt_ctl = 0;
- ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+ lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
mask_index = 0;
if (wol & WAKE_PHY) {
@@ -3861,30 +3849,30 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_MCAST_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
/* for IPv6 Multicast */
crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_MCAST_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
temp_pmt_ctl |= PMT_CTL_WOL_EN_;
@@ -3905,16 +3893,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
* for packettype (offset 12,13) = ARP (0x0806)
*/
crc = lan78xx_wakeframe_crc16(arp_type, 2);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_ALL_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
temp_pmt_ctl |= PMT_CTL_WOL_EN_;
@@ -3922,7 +3910,7 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
}
- ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+ lan78xx_write_reg(dev, WUCSR, temp_wucsr);
/* when multiple WOL bits are set */
if (hweight_long((unsigned long)wol) > 1) {
@@ -3930,16 +3918,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
}
- ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+ lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
/* clear WUPS */
- ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ lan78xx_read_reg(dev, PMT_CTL, &buf);
buf |= PMT_CTL_WUPS_MASK_;
- ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ lan78xx_write_reg(dev, PMT_CTL, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
return 0;
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 09bfa6a4dfbc..fc512b780d15 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -462,7 +462,7 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mcs7830_ioctl,
.ndo_set_rx_mode = mcs7830_set_multicast,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index fc378ff56775..d166c321ee9b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -72,7 +72,6 @@ struct qmimux_hdr {
struct qmimux_priv {
struct net_device *real_dev;
u8 mux_id;
- struct pcpu_sw_netstats __percpu *stats64;
};
static int qmimux_open(struct net_device *dev)
@@ -108,34 +107,19 @@ static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev
skb->dev = priv->real_dev;
ret = dev_queue_xmit(skb);
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct pcpu_sw_netstats *stats64 = this_cpu_ptr(priv->stats64);
-
- u64_stats_update_begin(&stats64->syncp);
- stats64->tx_packets++;
- stats64->tx_bytes += len;
- u64_stats_update_end(&stats64->syncp);
- } else {
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
+ dev_sw_netstats_tx_add(dev, 1, len);
+ else
dev->stats.tx_dropped++;
- }
return ret;
}
-static void qmimux_get_stats64(struct net_device *net,
- struct rtnl_link_stats64 *stats)
-{
- struct qmimux_priv *priv = netdev_priv(net);
-
- netdev_stats_to_stats64(stats, &net->stats);
- dev_fetch_sw_netstats(stats, priv->stats64);
-}
-
static const struct net_device_ops qmimux_netdev_ops = {
.ndo_open = qmimux_open,
.ndo_stop = qmimux_stop,
.ndo_start_xmit = qmimux_start_xmit,
- .ndo_get_stats64 = qmimux_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
};
static void qmimux_setup(struct net_device *dev)
@@ -224,14 +208,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
net->stats.rx_errors++;
return 0;
} else {
- struct pcpu_sw_netstats *stats64;
- struct qmimux_priv *priv = netdev_priv(net);
-
- stats64 = this_cpu_ptr(priv->stats64);
- u64_stats_update_begin(&stats64->syncp);
- stats64->rx_packets++;
- stats64->rx_bytes += pkt_len;
- u64_stats_update_end(&stats64->syncp);
+ dev_sw_netstats_rx_add(net, pkt_len);
}
skip:
@@ -256,8 +233,8 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
priv->mux_id = mux_id;
priv->real_dev = real_dev;
- priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!priv->stats64) {
+ new_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!new_dev->tstats) {
err = -ENOBUFS;
goto out_free_newdev;
}
@@ -292,7 +269,7 @@ static void qmimux_unregister_device(struct net_device *dev,
struct qmimux_priv *priv = netdev_priv(dev);
struct net_device *real_dev = priv->real_dev;
- free_percpu(priv->stats64);
+ free_percpu(dev->tstats);
netdev_upper_dev_unlink(real_dev, dev);
unregister_netdevice_queue(dev, head);
@@ -598,7 +575,7 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = qmi_wwan_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index b1770489aca5..c448d6089821 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/firmware.h>
#include <crypto/hash.h>
+#include <linux/usb/r8152.h>
/* Information for net-next */
#define NETNEXT_VERSION "11"
@@ -653,18 +654,6 @@ enum rtl_register_content {
#define INTR_LINK 0x0004
-#define RTL8152_REQT_READ 0xc0
-#define RTL8152_REQT_WRITE 0x40
-#define RTL8152_REQ_GET_REGS 0x05
-#define RTL8152_REQ_SET_REGS 0x05
-
-#define BYTE_EN_DWORD 0xff
-#define BYTE_EN_WORD 0x33
-#define BYTE_EN_BYTE 0x11
-#define BYTE_EN_SIX_BYTES 0x3f
-#define BYTE_EN_START_MASK 0x0f
-#define BYTE_EN_END_MASK 0xf0
-
#define RTL8153_MAX_PACKET 9216 /* 9K */
#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \
ETH_FCS_LEN)
@@ -689,21 +678,9 @@ enum rtl8152_flags {
LENOVO_MACPASSTHRU,
};
-/* Define these values to match your device */
-#define VENDOR_ID_REALTEK 0x0bda
-#define VENDOR_ID_MICROSOFT 0x045e
-#define VENDOR_ID_SAMSUNG 0x04e8
-#define VENDOR_ID_LENOVO 0x17ef
-#define VENDOR_ID_LINKSYS 0x13b1
-#define VENDOR_ID_NVIDIA 0x0955
-#define VENDOR_ID_TPLINK 0x2357
-
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
-#define MCU_TYPE_PLA 0x0100
-#define MCU_TYPE_USB 0x0000
-
struct tally_counter {
__le64 tx_packets;
__le64 rx_packets;
@@ -898,6 +875,7 @@ struct fw_header {
* struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB.
* The layout of the firmware block is:
* <struct fw_mac> + <info> + <firmware data>.
+ * @blk_hdr: firmware descriptor (type, length)
* @fw_offset: offset of the firmware binary data. The start address of
* the data would be the address of struct fw_mac + @fw_offset.
* @fw_reg: the register to load the firmware. Depends on chip.
@@ -911,6 +889,7 @@ struct fw_header {
* @bp_num: the break point number which needs to be set for this firmware.
* Depends on the firmware.
* @bp: break points. Depends on firmware.
+ * @reserved: reserved space (unused)
* @fw_ver_reg: the register to store the fw version.
* @fw_ver_data: the firmware version of the current type.
* @info: additional information for debugging, and is followed by the
@@ -936,8 +915,10 @@ struct fw_mac {
/**
* struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START.
* This is used to set patch key when loading the firmware of PHY.
+ * @blk_hdr: firmware descriptor (type, length)
* @key_reg: the register to write the patch key.
* @key_data: patch key.
+ * @reserved: reserved space (unused)
*/
struct fw_phy_patch_key {
struct fw_block blk_hdr;
@@ -950,6 +931,7 @@ struct fw_phy_patch_key {
* struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC.
* The layout of the firmware block is:
* <struct fw_phy_nc> + <info> + <firmware data>.
+ * @blk_hdr: firmware descriptor (type, length)
* @fw_offset: offset of the firmware binary data. The start address of
* the data would be the address of struct fw_phy_nc + @fw_offset.
* @fw_reg: the register to load the firmware. Depends on chip.
@@ -958,8 +940,9 @@ struct fw_phy_patch_key {
* @patch_en_addr: the register of enabling patch mode. Depends on chip.
* @patch_en_value: patch mode enabled mask. Depends on the firmware.
* @mode_reg: the regitster of switching the mode.
- * @mod_pre: the mode needing to be set before loading the firmware.
- * @mod_post: the mode to be set when finishing to load the firmware.
+ * @mode_pre: the mode needing to be set before loading the firmware.
+ * @mode_post: the mode to be set when finishing to load the firmware.
+ * @reserved: reserved space (unused)
* @bp_start: the start register of break points. Depends on chip.
* @bp_num: the break point number which needs to be set for this firmware.
* Depends on the firmware.
@@ -6615,7 +6598,7 @@ static int rtl_fw_init(struct r8152 *tp)
return 0;
}
-static u8 rtl_get_version(struct usb_interface *intf)
+u8 rtl8152_get_version(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
u32 ocp_data = 0;
@@ -6673,12 +6656,13 @@ static u8 rtl_get_version(struct usb_interface *intf)
return version;
}
+EXPORT_SYMBOL_GPL(rtl8152_get_version);
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- u8 version = rtl_get_version(intf);
+ u8 version = rtl8152_get_version(intf);
struct r8152 *tp;
struct net_device *netdev;
int ret;
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
new file mode 100644
index 000000000000..2c3fabd38b16
--- /dev/null
+++ b/drivers/net/usb/r8153_ecm.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/r8152.h>
+
+#define OCP_BASE 0xe86c
+
+static int pla_read_word(struct usbnet *dev, u16 index)
+{
+ u16 byen = BYTE_EN_WORD;
+ u8 shift = index & 2;
+ __le32 tmp;
+ int ret;
+
+ if (shift)
+ byen <<= shift;
+
+ index &= ~3;
+
+ ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+ if (ret < 0)
+ goto out;
+
+ ret = __le32_to_cpu(tmp);
+ ret >>= (shift * 8);
+ ret &= 0xffff;
+
+out:
+ return ret;
+}
+
+static int pla_write_word(struct usbnet *dev, u16 index, u32 data)
+{
+ u32 mask = 0xffff;
+ u16 byen = BYTE_EN_WORD;
+ u8 shift = index & 2;
+ __le32 tmp;
+ int ret;
+
+ data &= mask;
+
+ if (shift) {
+ byen <<= shift;
+ mask <<= (shift * 8);
+ data <<= (shift * 8);
+ }
+
+ index &= ~3;
+
+ ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+
+ if (ret < 0)
+ goto out;
+
+ data |= __le32_to_cpu(tmp) & ~mask;
+ tmp = __cpu_to_le32(data);
+
+ ret = usbnet_write_cmd(dev, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+
+out:
+ return ret;
+}
+
+static int r8153_ecm_mdio_read(struct net_device *netdev, int phy_id, int reg)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = pla_write_word(dev, OCP_BASE, 0xa000);
+ if (ret < 0)
+ goto out;
+
+ ret = pla_read_word(dev, 0xb400 + reg * 2);
+
+out:
+ return ret;
+}
+
+static void r8153_ecm_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = pla_write_word(dev, OCP_BASE, 0xa000);
+ if (ret < 0)
+ return;
+
+ ret = pla_write_word(dev, 0xb400 + reg * 2, val);
+}
+
+static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status;
+
+ status = usbnet_cdc_bind(dev, intf);
+ if (status < 0)
+ return status;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = r8153_ecm_mdio_read;
+ dev->mii.mdio_write = r8153_ecm_mdio_write;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.supports_gmii = 1;
+
+ return status;
+}
+
+static const struct driver_info r8153_info = {
+ .description = "RTL8153 ECM Device",
+ .flags = FLAG_ETHER,
+ .bind = r8153_bind,
+ .unbind = usbnet_cdc_unbind,
+ .status = usbnet_cdc_status,
+ .manage_power = usbnet_manage_power,
+};
+
+static const struct usb_device_id products[] = {
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
+
+ { }, /* END */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static int rtl8153_ecm_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+#if IS_REACHABLE(CONFIG_USB_RTL8152)
+ if (rtl8152_get_version(intf))
+ return -ENODEV;
+#endif
+
+ return usbnet_probe(intf, id);
+}
+
+static struct usb_driver r8153_ecm_driver = {
+ .name = "r8153_ecm",
+ .id_table = products,
+ .probe = rtl8153_ecm_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .reset_resume = usbnet_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(r8153_ecm_driver);
+
+MODULE_AUTHOR("Hayes Wang");
+MODULE_DESCRIPTION("Realtek USB ECM device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 6fa7a009a24a..6609d21ef894 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -279,7 +279,7 @@ static const struct net_device_ops rndis_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 0abd257b634c..55a244eca5ca 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -184,7 +184,7 @@ static const struct net_device_ops sierra_net_device_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 8689835a5214..4353b370249f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1435,7 +1435,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = smsc75xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index ea0d5f04dc3a..4c8ee1cff4d4 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1041,7 +1041,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = smsc95xx_ioctl,
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index e04c8054c2cf..878557ad03ad 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -308,7 +308,7 @@ static const struct net_device_ops sr9700_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = sr9700_ioctl,
.ndo_set_rx_mode = sr9700_set_multicast,
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 681e0def6356..da56735d7755 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -681,7 +681,7 @@ static const struct net_device_ops sr9800_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = sr_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = sr_ioctl,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 6062dc27870e..1447da1d5729 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -304,7 +304,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
*/
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
- struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
+ struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats);
unsigned long flags;
int status;
@@ -980,15 +980,6 @@ int usbnet_set_link_ksettings(struct net_device *net,
}
EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
-void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
-{
- struct usbnet *dev = netdev_priv(net);
-
- netdev_stats_to_stats64(stats, &net->stats);
- dev_fetch_sw_netstats(stats, dev->stats64);
-}
-EXPORT_SYMBOL_GPL(usbnet_get_stats64);
-
u32 usbnet_get_link (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@ -1220,7 +1211,7 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
+ struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats);
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
@@ -1596,7 +1587,7 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
- free_percpu(dev->stats64);
+ free_percpu(net->tstats);
free_netdev(net);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1608,7 +1599,7 @@ static const struct net_device_ops usbnet_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1671,8 +1662,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->driver_info = info;
dev->driver_name = name;
- dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->stats64)
+ net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!net->tstats)
goto out0;
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
@@ -1812,7 +1803,7 @@ out1:
*/
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
- free_percpu(dev->stats64);
+ free_percpu(net->tstats);
out0:
free_netdev(net);
out:
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8c737668008a..359d3ab33c4d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1329,7 +1329,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
}
if (ifmp && tbp[IFLA_IFNAME]) {
- nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
@@ -1379,7 +1379,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
eth_hw_addr_random(dev);
if (tb[IFLA_IFNAME])
- nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
else
snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1a557aeba32b..236fcc55a5fd 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -66,6 +66,7 @@ struct vxlan_net {
struct list_head vxlan_list;
struct hlist_head sock_list[PORT_HASH_SIZE];
spinlock_t sock_lock;
+ struct notifier_block nexthop_notifier_block;
};
/* Forwarding table entry */
@@ -3210,7 +3211,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -3229,7 +3230,7 @@ static const struct net_device_ops vxlan_netdev_raw_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
@@ -4683,9 +4684,14 @@ static void vxlan_fdb_nh_flush(struct nexthop *nh)
static int vxlan_nexthop_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct nexthop *nh = ptr;
+ struct nh_notifier_info *info = ptr;
+ struct nexthop *nh;
+
+ if (event != NEXTHOP_EVENT_DEL)
+ return NOTIFY_DONE;
- if (!nh || event != NEXTHOP_EVENT_DEL)
+ nh = nexthop_find_by_id(info->net, info->id);
+ if (!nh)
return NOTIFY_DONE;
vxlan_fdb_nh_flush(nh);
@@ -4693,10 +4699,6 @@ static int vxlan_nexthop_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block vxlan_nexthop_notifier_block __read_mostly = {
- .notifier_call = vxlan_nexthop_event,
-};
-
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -4704,11 +4706,13 @@ static __net_init int vxlan_init_net(struct net *net)
INIT_LIST_HEAD(&vn->vxlan_list);
spin_lock_init(&vn->sock_lock);
+ vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event;
for (h = 0; h < PORT_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vn->sock_list[h]);
- return register_nexthop_notifier(net, &vxlan_nexthop_notifier_block);
+ return register_nexthop_notifier(net, &vn->nexthop_notifier_block,
+ NULL);
}
static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
@@ -4740,8 +4744,11 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
LIST_HEAD(list);
rtnl_lock();
- list_for_each_entry(net, net_list, exit_list)
- unregister_nexthop_notifier(net, &vxlan_nexthop_notifier_block);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ }
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 39e5ab261d7c..4029fde71a9e 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -321,51 +321,6 @@ config IXP4XX_HSS
Say Y here if you want to use built-in HSS ports
on IXP4xx processor.
-config DLCI
- tristate "Frame Relay DLCI support"
- help
- Support for the Frame Relay protocol.
-
- Frame Relay is a fast low-cost way to connect to a remote Internet
- access provider or to form a private wide area network. The one
- physical line from your box to the local "switch" (i.e. the entry
- point to the Frame Relay network, usually at the phone company) can
- carry several logical point-to-point connections to other computers
- connected to the Frame Relay network. For a general explanation of
- the protocol, check out <http://www.mplsforum.org/>.
-
- To use frame relay, you need supporting hardware (called FRAD) and
- certain programs from the net-tools package as explained in
- <file:Documentation/networking/framerelay.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called dlci.
-
-config DLCI_MAX
- int "Max DLCI per device"
- depends on DLCI
- default "8"
- help
- How many logical point-to-point frame relay connections (the
- identifiers of which are called DCLIs) should be handled by each
- of your hardware frame relay access devices.
-
- Go with the default.
-
-config SDLA
- tristate "SDLA (Sangoma S502/S508) support"
- depends on DLCI && ISA
- help
- Driver for the Sangoma S502A, S502E, and S508 Frame Relay Access
- Devices.
-
- These are multi-protocol cards, but only Frame Relay is supported
- by the driver at this time. Please read
- <file:Documentation/networking/framerelay.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called sdla.
-
# X.25 network drivers
config LAPBETHER
tristate "LAPB over Ethernet driver"
@@ -383,21 +338,6 @@ config LAPBETHER
If unsure, say N.
-config X25_ASY
- tristate "X.25 async driver"
- depends on LAPB && X25 && TTY
- help
- Send and receive X.25 frames over regular asynchronous serial
- lines such as telephone lines equipped with ordinary modems.
-
- Experts should note that this driver doesn't currently comply with
- the asynchronous HDLS framing protocols in CCITT recommendation X.25.
-
- To compile this driver as a module, choose M here: the
- module will be called x25_asy.
-
- If unsure, say N.
-
config SBNI
tristate "Granch SBNI12 Leased Line adapter support"
depends on X86
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 380271a011e4..081666c36ca2 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -18,12 +18,9 @@ obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
obj-$(CONFIG_COSA) += cosa.o
obj-$(CONFIG_FARSYNC) += farsync.o
-obj-$(CONFIG_X25_ASY) += x25_asy.o
obj-$(CONFIG_LANMEDIA) += lmc/
-obj-$(CONFIG_DLCI) += dlci.o
-obj-$(CONFIG_SDLA) += sdla.o
obj-$(CONFIG_LAPBETHER) += lapbether.o
obj-$(CONFIG_SBNI) += sbni.o
obj-$(CONFIG_N2) += n2.o
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
deleted file mode 100644
index 3ca4daf63389..000000000000
--- a/drivers/net/wan/dlci.c
+++ /dev/null
@@ -1,541 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DLCI Implementation of Frame Relay protocol for Linux, according to
- * RFC 1490. This generic device provides en/decapsulation for an
- * underlying hardware driver. Routes & IPs are assigned to these
- * interfaces. Requires 'dlcicfg' program to create usable
- * interfaces, the initial one, 'dlci' is for IOCTL use only.
- *
- * Version: @(#)dlci.c 0.35 4 Jan 1997
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- *
- * 0.15 Mike Mclagan Packet freeing, bug in kmalloc call
- * DLCI_RET handling
- * 0.20 Mike McLagan More conservative on which packets
- * are returned for retry and which are
- * are dropped. If DLCI_RET_DROP is
- * returned from the FRAD, the packet is
- * sent back to Linux for re-transmission
- * 0.25 Mike McLagan Converted to use SIOC IOCTL calls
- * 0.30 Jim Freeman Fixed to allow IPX traffic
- * 0.35 Michael Elizabeth Fixed incorrect memcpy_fromfs
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_frad.h>
-#include <linux/bitops.h>
-
-#include <net/sock.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/uaccess.h>
-
-static const char version[] = "DLCI driver v0.35, 4 Jan 1997, mike.mclagan@linux.org";
-
-static LIST_HEAD(dlci_devs);
-
-static void dlci_setup(struct net_device *);
-
-/*
- * these encapsulate the RFC 1490 requirements as well as
- * deal with packet transmission and reception, working with
- * the upper network layers
- */
-
-static int dlci_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
-{
- struct frhdr hdr;
- unsigned int hlen;
- char *dest;
-
- hdr.control = FRAD_I_UI;
- switch (type)
- {
- case ETH_P_IP:
- hdr.IP_NLPID = FRAD_P_IP;
- hlen = sizeof(hdr.control) + sizeof(hdr.IP_NLPID);
- break;
-
- /* feel free to add other types, if necessary */
-
- default:
- hdr.pad = FRAD_P_PADDING;
- hdr.NLPID = FRAD_P_SNAP;
- memset(hdr.OUI, 0, sizeof(hdr.OUI));
- hdr.PID = htons(type);
- hlen = sizeof(hdr);
- break;
- }
-
- dest = skb_push(skb, hlen);
- if (!dest)
- return 0;
-
- memcpy(dest, &hdr, hlen);
-
- return hlen;
-}
-
-static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
-{
- struct frhdr *hdr;
- int process, header;
-
- if (!pskb_may_pull(skb, sizeof(*hdr))) {
- netdev_notice(dev, "invalid data no header\n");
- dev->stats.rx_errors++;
- kfree_skb(skb);
- return;
- }
-
- hdr = (struct frhdr *) skb->data;
- process = 0;
- header = 0;
- skb->dev = dev;
-
- if (hdr->control != FRAD_I_UI)
- {
- netdev_notice(dev, "Invalid header flag 0x%02X\n",
- hdr->control);
- dev->stats.rx_errors++;
- }
- else
- switch (hdr->IP_NLPID)
- {
- case FRAD_P_PADDING:
- if (hdr->NLPID != FRAD_P_SNAP)
- {
- netdev_notice(dev, "Unsupported NLPID 0x%02X\n",
- hdr->NLPID);
- dev->stats.rx_errors++;
- break;
- }
-
- if (hdr->OUI[0] + hdr->OUI[1] + hdr->OUI[2] != 0)
- {
- netdev_notice(dev, "Unsupported organizationally unique identifier 0x%02X-%02X-%02X\n",
- hdr->OUI[0],
- hdr->OUI[1],
- hdr->OUI[2]);
- dev->stats.rx_errors++;
- break;
- }
-
- /* at this point, it's an EtherType frame */
- header = sizeof(struct frhdr);
- /* Already in network order ! */
- skb->protocol = hdr->PID;
- process = 1;
- break;
-
- case FRAD_P_IP:
- header = sizeof(hdr->control) + sizeof(hdr->IP_NLPID);
- skb->protocol = htons(ETH_P_IP);
- process = 1;
- break;
-
- case FRAD_P_SNAP:
- case FRAD_P_Q933:
- case FRAD_P_CLNP:
- netdev_notice(dev, "Unsupported NLPID 0x%02X\n",
- hdr->pad);
- dev->stats.rx_errors++;
- break;
-
- default:
- netdev_notice(dev, "Invalid pad byte 0x%02X\n",
- hdr->pad);
- dev->stats.rx_errors++;
- break;
- }
-
- if (process)
- {
- /* we've set up the protocol, so discard the header */
- skb_reset_mac_header(skb);
- skb_pull(skb, header);
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
- dev->stats.rx_packets++;
- }
- else
- dev_kfree_skb(skb);
-}
-
-static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct dlci_local *dlp = netdev_priv(dev);
-
- if (skb) {
- struct netdev_queue *txq = skb_get_tx_queue(dev, skb);
- netdev_start_xmit(skb, dlp->slave, txq, false);
- }
- return NETDEV_TX_OK;
-}
-
-static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, int get)
-{
- struct dlci_conf config;
- struct dlci_local *dlp;
- struct frad_local *flp;
- int err;
-
- dlp = netdev_priv(dev);
-
- flp = netdev_priv(dlp->slave);
-
- if (!get)
- {
- if (copy_from_user(&config, conf, sizeof(struct dlci_conf)))
- return -EFAULT;
- if (config.flags & ~DLCI_VALID_FLAGS)
- return -EINVAL;
- memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
- dlp->configured = 1;
- }
-
- err = (*flp->dlci_conf)(dlp->slave, dev, get);
- if (err)
- return err;
-
- if (get)
- {
- if (copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct dlci_local *dlp;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- dlp = netdev_priv(dev);
-
- switch (cmd)
- {
- case DLCI_GET_SLAVE:
- if (!*(short *)(dev->dev_addr))
- return -EINVAL;
-
- strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
- break;
-
- case DLCI_GET_CONF:
- case DLCI_SET_CONF:
- if (!*(short *)(dev->dev_addr))
- return -EINVAL;
-
- return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF);
-
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int dlci_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct dlci_local *dlp = netdev_priv(dev);
-
- return dev_set_mtu(dlp->slave, new_mtu);
-}
-
-static int dlci_open(struct net_device *dev)
-{
- struct dlci_local *dlp;
- struct frad_local *flp;
- int err;
-
- dlp = netdev_priv(dev);
-
- if (!*(short *)(dev->dev_addr))
- return -EINVAL;
-
- if (!netif_running(dlp->slave))
- return -ENOTCONN;
-
- flp = netdev_priv(dlp->slave);
- err = (*flp->activate)(dlp->slave, dev);
- if (err)
- return err;
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int dlci_close(struct net_device *dev)
-{
- struct dlci_local *dlp;
- struct frad_local *flp;
-
- netif_stop_queue(dev);
-
- dlp = netdev_priv(dev);
-
- flp = netdev_priv(dlp->slave);
- (*flp->deactivate)(dlp->slave, dev);
-
- return 0;
-}
-
-static int dlci_add(struct dlci_add *dlci)
-{
- struct net_device *master, *slave;
- struct dlci_local *dlp;
- struct frad_local *flp;
- int err = -EINVAL;
-
-
- /* validate slave device */
- slave = dev_get_by_name(&init_net, dlci->devname);
- if (!slave)
- return -ENODEV;
-
- if (slave->type != ARPHRD_FRAD || netdev_priv(slave) == NULL)
- goto err1;
-
- /* create device name */
- master = alloc_netdev(sizeof(struct dlci_local), "dlci%d",
- NET_NAME_UNKNOWN, dlci_setup);
- if (!master) {
- err = -ENOMEM;
- goto err1;
- }
-
- /* make sure same slave not already registered */
- rtnl_lock();
- list_for_each_entry(dlp, &dlci_devs, list) {
- if (dlp->slave == slave) {
- err = -EBUSY;
- goto err2;
- }
- }
-
- *(short *)(master->dev_addr) = dlci->dlci;
-
- dlp = netdev_priv(master);
- dlp->slave = slave;
- dlp->master = master;
-
- flp = netdev_priv(slave);
- err = (*flp->assoc)(slave, master);
- if (err < 0)
- goto err2;
-
- err = register_netdevice(master);
- if (err < 0)
- goto err2;
-
- strcpy(dlci->devname, master->name);
-
- list_add(&dlp->list, &dlci_devs);
- rtnl_unlock();
-
- return 0;
-
- err2:
- rtnl_unlock();
- free_netdev(master);
- err1:
- dev_put(slave);
- return err;
-}
-
-static int dlci_del(struct dlci_add *dlci)
-{
- struct dlci_local *dlp;
- struct frad_local *flp;
- struct net_device *master, *slave;
- int err;
- bool found = false;
-
- rtnl_lock();
-
- /* validate slave device */
- master = __dev_get_by_name(&init_net, dlci->devname);
- if (!master) {
- err = -ENODEV;
- goto out;
- }
-
- list_for_each_entry(dlp, &dlci_devs, list) {
- if (dlp->master == master) {
- found = true;
- break;
- }
- }
- if (!found) {
- err = -ENODEV;
- goto out;
- }
-
- if (netif_running(master)) {
- err = -EBUSY;
- goto out;
- }
-
- dlp = netdev_priv(master);
- slave = dlp->slave;
- flp = netdev_priv(slave);
-
- err = (*flp->deassoc)(slave, master);
- if (!err) {
- list_del(&dlp->list);
-
- unregister_netdevice(master);
-
- dev_put(slave);
- }
-out:
- rtnl_unlock();
- return err;
-}
-
-static int dlci_ioctl(unsigned int cmd, void __user *arg)
-{
- struct dlci_add add;
- int err;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&add, arg, sizeof(struct dlci_add)))
- return -EFAULT;
-
- switch (cmd)
- {
- case SIOCADDDLCI:
- err = dlci_add(&add);
-
- if (!err)
- if (copy_to_user(arg, &add, sizeof(struct dlci_add)))
- return -EFAULT;
- break;
-
- case SIOCDELDLCI:
- err = dlci_del(&add);
- break;
-
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
-static const struct header_ops dlci_header_ops = {
- .create = dlci_header,
-};
-
-static const struct net_device_ops dlci_netdev_ops = {
- .ndo_open = dlci_open,
- .ndo_stop = dlci_close,
- .ndo_do_ioctl = dlci_dev_ioctl,
- .ndo_start_xmit = dlci_transmit,
- .ndo_change_mtu = dlci_change_mtu,
-};
-
-static void dlci_setup(struct net_device *dev)
-{
- struct dlci_local *dlp = netdev_priv(dev);
-
- dev->flags = 0;
- dev->header_ops = &dlci_header_ops;
- dev->netdev_ops = &dlci_netdev_ops;
- dev->needs_free_netdev = true;
-
- dlp->receive = dlci_receive;
-
- dev->type = ARPHRD_DLCI;
- dev->hard_header_len = sizeof(struct frhdr);
- dev->addr_len = sizeof(short);
-
-}
-
-/* if slave is unregistering, then cleanup master */
-static int dlci_dev_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
- if (dev_net(dev) != &init_net)
- return NOTIFY_DONE;
-
- if (event == NETDEV_UNREGISTER) {
- struct dlci_local *dlp;
-
- list_for_each_entry(dlp, &dlci_devs, list) {
- if (dlp->slave == dev) {
- list_del(&dlp->list);
- unregister_netdevice(dlp->master);
- dev_put(dlp->slave);
- break;
- }
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block dlci_notifier = {
- .notifier_call = dlci_dev_event,
-};
-
-static int __init init_dlci(void)
-{
- dlci_ioctl_set(dlci_ioctl);
- register_netdevice_notifier(&dlci_notifier);
-
- printk("%s.\n", version);
-
- return 0;
-}
-
-static void __exit dlci_exit(void)
-{
- struct dlci_local *dlp, *nxt;
-
- dlci_ioctl_set(NULL);
- unregister_netdevice_notifier(&dlci_notifier);
-
- rtnl_lock();
- list_for_each_entry_safe(dlp, nxt, &dlci_devs, list) {
- unregister_netdevice(dlp->master);
- dev_put(dlp->slave);
- }
- rtnl_unlock();
-}
-
-module_init(init_dlci);
-module_exit(dlci_exit);
-
-MODULE_AUTHOR("Mike McLagan");
-MODULE_DESCRIPTION("Frame Relay DLCI layer");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 409e5a7ad8e2..0720f5f92caa 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -871,6 +871,45 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
return 0;
}
+static int fr_snap_parse(struct sk_buff *skb, struct pvc_device *pvc)
+{
+ /* OUI 00-00-00 indicates an Ethertype follows */
+ if (skb->data[0] == 0x00 &&
+ skb->data[1] == 0x00 &&
+ skb->data[2] == 0x00) {
+ if (!pvc->main)
+ return -1;
+ skb->dev = pvc->main;
+ skb->protocol = *(__be16 *)(skb->data + 3); /* Ethertype */
+ skb_pull(skb, 5);
+ skb_reset_mac_header(skb);
+ return 0;
+
+ /* OUI 00-80-C2 stands for the 802.1 organization */
+ } else if (skb->data[0] == 0x00 &&
+ skb->data[1] == 0x80 &&
+ skb->data[2] == 0xC2) {
+ /* PID 00-07 stands for Ethernet frames without FCS */
+ if (skb->data[3] == 0x00 &&
+ skb->data[4] == 0x07) {
+ if (!pvc->ether)
+ return -1;
+ skb_pull(skb, 5);
+ if (skb->len < ETH_HLEN)
+ return -1;
+ skb->protocol = eth_type_trans(skb, pvc->ether);
+ return 0;
+
+ /* PID unsupported */
+ } else {
+ return -1;
+ }
+
+ /* OUI unsupported */
+ } else {
+ return -1;
+ }
+}
static int fr_rx(struct sk_buff *skb)
{
@@ -880,9 +919,9 @@ static int fr_rx(struct sk_buff *skb)
u8 *data = skb->data;
u16 dlci;
struct pvc_device *pvc;
- struct net_device *dev = NULL;
+ struct net_device *dev;
- if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
+ if (skb->len < 4 || fh->ea1 || !fh->ea2 || data[2] != FR_UI)
goto rx_error;
dlci = q922_to_dlci(skb->data);
@@ -904,8 +943,7 @@ static int fr_rx(struct sk_buff *skb)
netdev_info(frad, "No PVC for received frame's DLCI %d\n",
dlci);
#endif
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ goto rx_drop;
}
if (pvc->state.fecn != fh->fecn) {
@@ -931,63 +969,51 @@ static int fr_rx(struct sk_buff *skb)
}
if (data[3] == NLPID_IP) {
+ if (!pvc->main)
+ goto rx_drop;
skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
- dev = pvc->main;
+ skb->dev = pvc->main;
skb->protocol = htons(ETH_P_IP);
+ skb_reset_mac_header(skb);
} else if (data[3] == NLPID_IPV6) {
+ if (!pvc->main)
+ goto rx_drop;
skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
- dev = pvc->main;
+ skb->dev = pvc->main;
skb->protocol = htons(ETH_P_IPV6);
+ skb_reset_mac_header(skb);
- } else if (skb->len > 10 && data[3] == FR_PAD &&
- data[4] == NLPID_SNAP && data[5] == FR_PAD) {
- u16 oui = ntohs(*(__be16*)(data + 6));
- u16 pid = ntohs(*(__be16*)(data + 8));
- skb_pull(skb, 10);
-
- switch ((((u32)oui) << 16) | pid) {
- case ETH_P_ARP: /* routed frame with SNAP */
- case ETH_P_IPX:
- case ETH_P_IP: /* a long variant */
- case ETH_P_IPV6:
- dev = pvc->main;
- skb->protocol = htons(pid);
- break;
-
- case 0x80C20007: /* bridged Ethernet frame */
- if ((dev = pvc->ether) != NULL)
- skb->protocol = eth_type_trans(skb, dev);
- break;
-
- default:
- netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
- oui, pid);
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ } else if (data[3] == FR_PAD) {
+ if (skb->len < 5)
+ goto rx_error;
+ if (data[4] == NLPID_SNAP) { /* A SNAP header follows */
+ skb_pull(skb, 5);
+ if (skb->len < 5) /* Incomplete SNAP header */
+ goto rx_error;
+ if (fr_snap_parse(skb, pvc))
+ goto rx_drop;
+ } else {
+ goto rx_drop;
}
+
} else {
netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
data[3], skb->len);
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ goto rx_drop;
}
- if (dev) {
- dev->stats.rx_packets++; /* PVC traffic */
- dev->stats.rx_bytes += skb->len;
- if (pvc->state.becn)
- dev->stats.rx_compressed++;
- skb->dev = dev;
- netif_rx(skb);
- return NET_RX_SUCCESS;
- } else {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
+ dev = skb->dev;
+ dev->stats.rx_packets++; /* PVC traffic */
+ dev->stats.rx_bytes += skb->len;
+ if (pvc->state.becn)
+ dev->stats.rx_compressed++;
+ netif_rx(skb);
+ return NET_RX_SUCCESS;
- rx_error:
+rx_error:
frad->stats.rx_errors++; /* Mark error */
+rx_drop:
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 36600b0a0ab0..93c7e8502845 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -353,9 +353,8 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
switch(xc.command){
case lmc_xilinx_reset: /*fold02*/
{
- u16 mii;
spin_lock_irqsave(&sc->lmc_lock, flags);
- mii = lmc_mii_readreg (sc, 0, 16);
+ lmc_mii_readreg (sc, 0, 16);
/*
* Make all of them 0 and make input
@@ -424,10 +423,9 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
case lmc_xilinx_load_prom: /*fold02*/
{
- u16 mii;
int timeout = 500000;
spin_lock_irqsave(&sc->lmc_lock, flags);
- mii = lmc_mii_readreg (sc, 0, 16);
+ lmc_mii_readreg (sc, 0, 16);
/*
* Make all of them 0 and make input
@@ -1185,7 +1183,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
int i;
s32 stat;
unsigned int badtx;
- u32 firstcsr;
int max_work = LMC_RXDESCS;
int handled = 0;
@@ -1203,8 +1200,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
goto lmc_int_fail_out;
}
- firstcsr = csr;
-
/* always go through this loop at least once */
while (csr & sc->lmc_intrmask) {
handled = 1;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
deleted file mode 100644
index bc2c1c7fb1a4..000000000000
--- a/drivers/net/wan/sdla.c
+++ /dev/null
@@ -1,1655 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SDLA An implementation of a driver for the Sangoma S502/S508 series
- * multi-protocol PC interface card. Initial offering is with
- * the DLCI driver, providing Frame Relay support for linux.
- *
- * Global definitions for the Frame relay interface.
- *
- * Version: @(#)sdla.c 0.30 12 Sep 1996
- *
- * Credits: Sangoma Technologies, for the use of 2 cards for an extended
- * period of time.
- * David Mandelstam <dm@sangoma.com> for getting me started on
- * this project, and incentive to complete it.
- * Gene Kozen <74604.152@compuserve.com> for providing me with
- * important information about the cards.
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan Improved error handling, packet dropping
- * 0.20 Mike McLagan New transmit/receive flags for config
- * If in FR mode, don't accept packets from
- * non DLCI devices.
- * 0.25 Mike McLagan Fixed problem with rejecting packets
- * from non DLCI devices.
- * 0.30 Mike McLagan Fixed kernel panic when used with modified
- * ifconfig
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_frad.h>
-#include <linux/sdla.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/uaccess.h>
-
-static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org";
-
-static unsigned int valid_port[] = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390};
-
-static unsigned int valid_mem[] = {
- 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
- 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000,
- 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
- 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000,
- 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000};
-
-static DEFINE_SPINLOCK(sdla_lock);
-
-/*********************************************************
- *
- * these are the core routines that access the card itself
- *
- *********************************************************/
-
-#define SDLA_WINDOW(dev,addr) outb((((addr) >> 13) & 0x1F), (dev)->base_addr + SDLA_REG_Z80_WINDOW)
-
-static void __sdla_read(struct net_device *dev, int addr, void *buf, short len)
-{
- char *temp;
- const void *base;
- int offset, bytes;
-
- temp = buf;
- while(len)
- {
- offset = addr & SDLA_ADDR_MASK;
- bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
- base = (const void *) (dev->mem_start + offset);
-
- SDLA_WINDOW(dev, addr);
- memcpy(temp, base, bytes);
-
- addr += bytes;
- temp += bytes;
- len -= bytes;
- }
-}
-
-static void sdla_read(struct net_device *dev, int addr, void *buf, short len)
-{
- unsigned long flags;
- spin_lock_irqsave(&sdla_lock, flags);
- __sdla_read(dev, addr, buf, len);
- spin_unlock_irqrestore(&sdla_lock, flags);
-}
-
-static void __sdla_write(struct net_device *dev, int addr,
- const void *buf, short len)
-{
- const char *temp;
- void *base;
- int offset, bytes;
-
- temp = buf;
- while(len)
- {
- offset = addr & SDLA_ADDR_MASK;
- bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
- base = (void *) (dev->mem_start + offset);
-
- SDLA_WINDOW(dev, addr);
- memcpy(base, temp, bytes);
-
- addr += bytes;
- temp += bytes;
- len -= bytes;
- }
-}
-
-static void sdla_write(struct net_device *dev, int addr,
- const void *buf, short len)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sdla_lock, flags);
- __sdla_write(dev, addr, buf, len);
- spin_unlock_irqrestore(&sdla_lock, flags);
-}
-
-
-static void sdla_clear(struct net_device *dev)
-{
- unsigned long flags;
- char *base;
- int len, addr, bytes;
-
- len = 65536;
- addr = 0;
- bytes = SDLA_WINDOW_SIZE;
- base = (void *) dev->mem_start;
-
- spin_lock_irqsave(&sdla_lock, flags);
- while(len)
- {
- SDLA_WINDOW(dev, addr);
- memset(base, 0, bytes);
-
- addr += bytes;
- len -= bytes;
- }
- spin_unlock_irqrestore(&sdla_lock, flags);
-
-}
-
-static char sdla_byte(struct net_device *dev, int addr)
-{
- unsigned long flags;
- char byte, *temp;
-
- temp = (void *) (dev->mem_start + (addr & SDLA_ADDR_MASK));
-
- spin_lock_irqsave(&sdla_lock, flags);
- SDLA_WINDOW(dev, addr);
- byte = *temp;
- spin_unlock_irqrestore(&sdla_lock, flags);
-
- return byte;
-}
-
-static void sdla_stop(struct net_device *dev)
-{
- struct frad_local *flp;
-
- flp = netdev_priv(dev);
- switch(flp->type)
- {
- case SDLA_S502A:
- outb(SDLA_S502A_HALT, dev->base_addr + SDLA_REG_CONTROL);
- flp->state = SDLA_HALT;
- break;
- case SDLA_S502E:
- outb(SDLA_HALT, dev->base_addr + SDLA_REG_Z80_CONTROL);
- outb(SDLA_S502E_ENABLE, dev->base_addr + SDLA_REG_CONTROL);
- flp->state = SDLA_S502E_ENABLE;
- break;
- case SDLA_S507:
- flp->state &= ~SDLA_CPUEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- case SDLA_S508:
- flp->state &= ~SDLA_CPUEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- }
-}
-
-static void sdla_start(struct net_device *dev)
-{
- struct frad_local *flp;
-
- flp = netdev_priv(dev);
- switch(flp->type)
- {
- case SDLA_S502A:
- outb(SDLA_S502A_NMI, dev->base_addr + SDLA_REG_CONTROL);
- outb(SDLA_S502A_START, dev->base_addr + SDLA_REG_CONTROL);
- flp->state = SDLA_S502A_START;
- break;
- case SDLA_S502E:
- outb(SDLA_S502E_CPUEN, dev->base_addr + SDLA_REG_Z80_CONTROL);
- outb(0x00, dev->base_addr + SDLA_REG_CONTROL);
- flp->state = 0;
- break;
- case SDLA_S507:
- flp->state |= SDLA_CPUEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- case SDLA_S508:
- flp->state |= SDLA_CPUEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- }
-}
-
-/****************************************************
- *
- * this is used for the S502A/E cards to determine
- * the speed of the onboard CPU. Calibration is
- * necessary for the Frame Relay code uploaded
- * later. Incorrect results cause timing problems
- * with link checks & status messages
- *
- ***************************************************/
-
-static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
-{
- unsigned long start, done, now;
- char resp, *temp;
-
- start = now = jiffies;
- done = jiffies + jiffs;
-
- temp = (void *)dev->mem_start;
- temp += z80_addr & SDLA_ADDR_MASK;
-
- resp = ~resp1;
- while (time_before(jiffies, done) && (resp != resp1) && (!resp2 || (resp != resp2)))
- {
- if (jiffies != now)
- {
- SDLA_WINDOW(dev, z80_addr);
- now = jiffies;
- resp = *temp;
- }
- }
- return time_before(jiffies, done) ? jiffies - start : -1;
-}
-
-/* constants for Z80 CPU speed */
-#define Z80_READY '1' /* Z80 is ready to begin */
-#define LOADER_READY '2' /* driver is ready to begin */
-#define Z80_SCC_OK '3' /* SCC is on board */
-#define Z80_SCC_BAD '4' /* SCC was not found */
-
-static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
-{
- int jiffs;
- char data;
-
- sdla_start(dev);
- if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0)
- return -EIO;
-
- data = LOADER_READY;
- sdla_write(dev, 0, &data, 1);
-
- if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0)
- return -EIO;
-
- sdla_stop(dev);
- sdla_read(dev, 0, &data, 1);
-
- if (data == Z80_SCC_BAD)
- {
- printk("%s: SCC bad\n", dev->name);
- return -EIO;
- }
-
- if (data != Z80_SCC_OK)
- return -EINVAL;
-
- if (jiffs < 165)
- ifr->ifr_mtu = SDLA_CPU_16M;
- else if (jiffs < 220)
- ifr->ifr_mtu = SDLA_CPU_10M;
- else if (jiffs < 258)
- ifr->ifr_mtu = SDLA_CPU_8M;
- else if (jiffs < 357)
- ifr->ifr_mtu = SDLA_CPU_7M;
- else if (jiffs < 467)
- ifr->ifr_mtu = SDLA_CPU_5M;
- else
- ifr->ifr_mtu = SDLA_CPU_3M;
-
- return 0;
-}
-
-/************************************************
- *
- * Direct interaction with the Frame Relay code
- * starts here.
- *
- ************************************************/
-
-struct _dlci_stat
-{
- short dlci;
- char flags;
-} __packed;
-
-struct _frad_stat
-{
- char flags;
- struct _dlci_stat dlcis[SDLA_MAX_DLCI];
-};
-
-static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int len, void *data)
-{
- struct _dlci_stat *pstatus;
- short *pdlci;
- int i;
- char *state, line[30];
-
- switch (ret)
- {
- case SDLA_RET_MODEM:
- state = data;
- if (*state & SDLA_MODEM_DCD_LOW)
- netdev_info(dev, "Modem DCD unexpectedly low!\n");
- if (*state & SDLA_MODEM_CTS_LOW)
- netdev_info(dev, "Modem CTS unexpectedly low!\n");
- /* I should probably do something about this! */
- break;
-
- case SDLA_RET_CHANNEL_OFF:
- netdev_info(dev, "Channel became inoperative!\n");
- /* same here */
- break;
-
- case SDLA_RET_CHANNEL_ON:
- netdev_info(dev, "Channel became operative!\n");
- /* same here */
- break;
-
- case SDLA_RET_DLCI_STATUS:
- netdev_info(dev, "Status change reported by Access Node\n");
- len /= sizeof(struct _dlci_stat);
- for(pstatus = data, i=0;i < len;i++,pstatus++)
- {
- if (pstatus->flags & SDLA_DLCI_NEW)
- state = "new";
- else if (pstatus->flags & SDLA_DLCI_DELETED)
- state = "deleted";
- else if (pstatus->flags & SDLA_DLCI_ACTIVE)
- state = "active";
- else
- {
- sprintf(line, "unknown status: %02X", pstatus->flags);
- state = line;
- }
- netdev_info(dev, "DLCI %i: %s\n",
- pstatus->dlci, state);
- /* same here */
- }
- break;
-
- case SDLA_RET_DLCI_UNKNOWN:
- netdev_info(dev, "Received unknown DLCIs:");
- len /= sizeof(short);
- for(pdlci = data,i=0;i < len;i++,pdlci++)
- pr_cont(" %i", *pdlci);
- pr_cont("\n");
- break;
-
- case SDLA_RET_TIMEOUT:
- netdev_err(dev, "Command timed out!\n");
- break;
-
- case SDLA_RET_BUF_OVERSIZE:
- netdev_info(dev, "Bc/CIR overflow, acceptable size is %i\n",
- len);
- break;
-
- case SDLA_RET_BUF_TOO_BIG:
- netdev_info(dev, "Buffer size over specified max of %i\n",
- len);
- break;
-
- case SDLA_RET_CHANNEL_INACTIVE:
- case SDLA_RET_DLCI_INACTIVE:
- case SDLA_RET_CIR_OVERFLOW:
- case SDLA_RET_NO_BUFS:
- if (cmd == SDLA_INFORMATION_WRITE)
- break;
- fallthrough;
-
- default:
- netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
- cmd, ret);
- /* Further processing could be done here */
- break;
- }
-}
-
-static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
- void *inbuf, short inlen, void *outbuf, short *outlen)
-{
- static struct _frad_stat status;
- struct frad_local *flp;
- struct sdla_cmd *cmd_buf;
- unsigned long pflags;
- unsigned long jiffs;
- int ret, waiting, len;
- long window;
-
- flp = netdev_priv(dev);
- window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF;
- cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK));
- ret = 0;
- len = 0;
- jiffs = jiffies + HZ; /* 1 second is plenty */
-
- spin_lock_irqsave(&sdla_lock, pflags);
- SDLA_WINDOW(dev, window);
- cmd_buf->cmd = cmd;
- cmd_buf->dlci = dlci;
- cmd_buf->flags = flags;
-
- if (inbuf)
- memcpy(cmd_buf->data, inbuf, inlen);
-
- cmd_buf->length = inlen;
-
- cmd_buf->opp_flag = 1;
- spin_unlock_irqrestore(&sdla_lock, pflags);
-
- waiting = 1;
- len = 0;
- while (waiting && time_before_eq(jiffies, jiffs))
- {
- if (waiting++ % 3)
- {
- spin_lock_irqsave(&sdla_lock, pflags);
- SDLA_WINDOW(dev, window);
- waiting = ((volatile int)(cmd_buf->opp_flag));
- spin_unlock_irqrestore(&sdla_lock, pflags);
- }
- }
-
- if (!waiting)
- {
-
- spin_lock_irqsave(&sdla_lock, pflags);
- SDLA_WINDOW(dev, window);
- ret = cmd_buf->retval;
- len = cmd_buf->length;
- if (outbuf && outlen)
- {
- *outlen = *outlen >= len ? len : *outlen;
-
- if (*outlen)
- memcpy(outbuf, cmd_buf->data, *outlen);
- }
-
- /* This is a local copy that's used for error handling */
- if (ret)
- memcpy(&status, cmd_buf->data, len > sizeof(status) ? sizeof(status) : len);
-
- spin_unlock_irqrestore(&sdla_lock, pflags);
- }
- else
- ret = SDLA_RET_TIMEOUT;
-
- if (ret != SDLA_RET_OK)
- sdla_errors(dev, cmd, dlci, ret, len, &status);
-
- return ret;
-}
-
-/***********************************************
- *
- * these functions are called by the DLCI driver
- *
- ***********************************************/
-
-static int sdla_reconfig(struct net_device *dev);
-
-static int sdla_activate(struct net_device *slave, struct net_device *master)
-{
- struct frad_local *flp;
- int i;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i] == master)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- return -ENODEV;
-
- flp->dlci[i] = abs(flp->dlci[i]);
-
- if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
- sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
-
- return 0;
-}
-
-static int sdla_deactivate(struct net_device *slave, struct net_device *master)
-{
- struct frad_local *flp;
- int i;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i] == master)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- return -ENODEV;
-
- flp->dlci[i] = -abs(flp->dlci[i]);
-
- if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
- sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
-
- return 0;
-}
-
-static int sdla_assoc(struct net_device *slave, struct net_device *master)
-{
- struct frad_local *flp;
- int i;
-
- if (master->type != ARPHRD_DLCI)
- return -EINVAL;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- {
- if (!flp->master[i])
- break;
- if (abs(flp->dlci[i]) == *(short *)(master->dev_addr))
- return -EADDRINUSE;
- }
-
- if (i == CONFIG_DLCI_MAX)
- return -EMLINK; /* #### Alan: Comments on this ?? */
-
-
- flp->master[i] = master;
- flp->dlci[i] = -*(short *)(master->dev_addr);
- master->mtu = slave->mtu;
-
- if (netif_running(slave)) {
- if (flp->config.station == FRAD_STATION_CPE)
- sdla_reconfig(slave);
- else
- sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
- }
-
- return 0;
-}
-
-static int sdla_deassoc(struct net_device *slave, struct net_device *master)
-{
- struct frad_local *flp;
- int i;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i] == master)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- return -ENODEV;
-
- flp->master[i] = NULL;
- flp->dlci[i] = 0;
-
-
- if (netif_running(slave)) {
- if (flp->config.station == FRAD_STATION_CPE)
- sdla_reconfig(slave);
- else
- sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
- }
-
- return 0;
-}
-
-static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
-{
- struct frad_local *flp;
- struct dlci_local *dlp;
- int i;
- short len, ret;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i] == master)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- return -ENODEV;
-
- dlp = netdev_priv(master);
-
- ret = SDLA_RET_OK;
- len = sizeof(struct dlci_conf);
- if (netif_running(slave)) {
- if (get)
- ret = sdla_cmd(slave, SDLA_READ_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
- NULL, 0, &dlp->config, &len);
- else
- ret = sdla_cmd(slave, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
- &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL);
- }
-
- return ret == SDLA_RET_OK ? 0 : -EIO;
-}
-
-/**************************
- *
- * now for the Linux driver
- *
- **************************/
-
-/* NOTE: the DLCI driver deals with freeing the SKB!! */
-static netdev_tx_t sdla_transmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct frad_local *flp;
- int ret, addr, accept, i;
- short size;
- unsigned long flags;
- struct buf_entry *pbuf;
-
- flp = netdev_priv(dev);
- ret = 0;
- accept = 1;
-
- netif_stop_queue(dev);
-
- /*
- * stupid GateD insists on setting up the multicast router thru us
- * and we're ill equipped to handle a non Frame Relay packet at this
- * time!
- */
-
- accept = 1;
- switch (dev->type)
- {
- case ARPHRD_FRAD:
- if (skb->dev->type != ARPHRD_DLCI)
- {
- netdev_warn(dev, "Non DLCI device, type %i, tried to send on FRAD module\n",
- skb->dev->type);
- accept = 0;
- }
- break;
- default:
- netdev_warn(dev, "unknown firmware type 0x%04X\n",
- dev->type);
- accept = 0;
- break;
- }
- if (accept)
- {
- /* this is frame specific, but till there's a PPP module, it's the default */
- switch (flp->type)
- {
- case SDLA_S502A:
- case SDLA_S502E:
- ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL);
- break;
- case SDLA_S508:
- size = sizeof(addr);
- ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size);
- if (ret == SDLA_RET_OK)
- {
-
- spin_lock_irqsave(&sdla_lock, flags);
- SDLA_WINDOW(dev, addr);
- pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK));
- __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
- SDLA_WINDOW(dev, addr);
- pbuf->opp_flag = 1;
- spin_unlock_irqrestore(&sdla_lock, flags);
- }
- break;
- }
-
- switch (ret)
- {
- case SDLA_RET_OK:
- dev->stats.tx_packets++;
- break;
-
- case SDLA_RET_CIR_OVERFLOW:
- case SDLA_RET_BUF_OVERSIZE:
- case SDLA_RET_NO_BUFS:
- dev->stats.tx_dropped++;
- break;
-
- default:
- dev->stats.tx_errors++;
- break;
- }
- }
- netif_wake_queue(dev);
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- {
- if(flp->master[i]!=NULL)
- netif_wake_queue(flp->master[i]);
- }
-
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static void sdla_receive(struct net_device *dev)
-{
- struct net_device *master;
- struct frad_local *flp;
- struct dlci_local *dlp;
- struct sk_buff *skb;
-
- struct sdla_cmd *cmd;
- struct buf_info *pbufi;
- struct buf_entry *pbuf;
-
- unsigned long flags;
- int i=0, received, success, addr, buf_base, buf_top;
- short dlci, len, len2, split;
-
- flp = netdev_priv(dev);
- success = 1;
- received = addr = buf_top = buf_base = 0;
- len = dlci = 0;
- skb = NULL;
- master = NULL;
- cmd = NULL;
- pbufi = NULL;
- pbuf = NULL;
-
- spin_lock_irqsave(&sdla_lock, flags);
-
- switch (flp->type)
- {
- case SDLA_S502A:
- case SDLA_S502E:
- cmd = (void *) (dev->mem_start + (SDLA_502_RCV_BUF & SDLA_ADDR_MASK));
- SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
- success = cmd->opp_flag;
- if (!success)
- break;
-
- dlci = cmd->dlci;
- len = cmd->length;
- break;
-
- case SDLA_S508:
- pbufi = (void *) (dev->mem_start + (SDLA_508_RXBUF_INFO & SDLA_ADDR_MASK));
- SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
- pbuf = (void *) (dev->mem_start + ((pbufi->rse_base + flp->buffer * sizeof(struct buf_entry)) & SDLA_ADDR_MASK));
- success = pbuf->opp_flag;
- if (!success)
- break;
-
- buf_top = pbufi->buf_top;
- buf_base = pbufi->buf_base;
- dlci = pbuf->dlci;
- len = pbuf->length;
- addr = pbuf->buf_addr;
- break;
- }
-
- /* common code, find the DLCI and get the SKB */
- if (success)
- {
- for (i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i] == dlci)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- {
- netdev_notice(dev, "Received packet from invalid DLCI %i, ignoring\n",
- dlci);
- dev->stats.rx_errors++;
- success = 0;
- }
- }
-
- if (success)
- {
- master = flp->master[i];
- skb = dev_alloc_skb(len + sizeof(struct frhdr));
- if (skb == NULL)
- {
- netdev_notice(dev, "Memory squeeze, dropping packet\n");
- dev->stats.rx_dropped++;
- success = 0;
- }
- else
- skb_reserve(skb, sizeof(struct frhdr));
- }
-
- /* pick up the data */
- switch (flp->type)
- {
- case SDLA_S502A:
- case SDLA_S502E:
- if (success)
- __sdla_read(dev, SDLA_502_RCV_BUF + SDLA_502_DATA_OFS, skb_put(skb,len), len);
-
- SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
- cmd->opp_flag = 0;
- break;
-
- case SDLA_S508:
- if (success)
- {
- /* is this buffer split off the end of the internal ring buffer */
- split = addr + len > buf_top + 1 ? len - (buf_top - addr + 1) : 0;
- len2 = len - split;
-
- __sdla_read(dev, addr, skb_put(skb, len2), len2);
- if (split)
- __sdla_read(dev, buf_base, skb_put(skb, split), split);
- }
-
- /* increment the buffer we're looking at */
- SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
- flp->buffer = (flp->buffer + 1) % pbufi->rse_num;
- pbuf->opp_flag = 0;
- break;
- }
-
- if (success)
- {
- dev->stats.rx_packets++;
- dlp = netdev_priv(master);
- (*dlp->receive)(skb, master);
- }
-
- spin_unlock_irqrestore(&sdla_lock, flags);
-}
-
-static irqreturn_t sdla_isr(int dummy, void *dev_id)
-{
- struct net_device *dev;
- struct frad_local *flp;
- char byte;
-
- dev = dev_id;
-
- flp = netdev_priv(dev);
-
- if (!flp->initialized)
- {
- netdev_warn(dev, "irq %d for uninitialized device\n", dev->irq);
- return IRQ_NONE;
- }
-
- byte = sdla_byte(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE);
- switch (byte)
- {
- case SDLA_INTR_RX:
- sdla_receive(dev);
- break;
-
- /* the command will get an error return, which is processed above */
- case SDLA_INTR_MODEM:
- case SDLA_INTR_STATUS:
- sdla_cmd(dev, SDLA_READ_DLC_STATUS, 0, 0, NULL, 0, NULL, NULL);
- break;
-
- case SDLA_INTR_TX:
- case SDLA_INTR_COMPLETE:
- case SDLA_INTR_TIMER:
- netdev_warn(dev, "invalid irq flag 0x%02X\n", byte);
- break;
- }
-
- /* the S502E requires a manual acknowledgement of the interrupt */
- if (flp->type == SDLA_S502E)
- {
- flp->state &= ~SDLA_S502E_INTACK;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- flp->state |= SDLA_S502E_INTACK;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- }
-
- /* this clears the byte, informing the Z80 we're done */
- byte = 0;
- sdla_write(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
- return IRQ_HANDLED;
-}
-
-static void sdla_poll(struct timer_list *t)
-{
- struct frad_local *flp = from_timer(flp, t, timer);
- struct net_device *dev = flp->dev;
-
- if (sdla_byte(dev, SDLA_502_RCV_BUF))
- sdla_receive(dev);
-
- flp->timer.expires = 1;
- add_timer(&flp->timer);
-}
-
-static int sdla_close(struct net_device *dev)
-{
- struct frad_local *flp;
- struct intr_info intr;
- int len, i;
- short dlcis[CONFIG_DLCI_MAX];
-
- flp = netdev_priv(dev);
-
- len = 0;
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i])
- dlcis[len++] = abs(flp->dlci[i]);
- len *= 2;
-
- if (flp->config.station == FRAD_STATION_NODE)
- {
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i] > 0)
- sdla_cmd(dev, SDLA_DEACTIVATE_DLCI, 0, 0, dlcis, len, NULL, NULL);
- sdla_cmd(dev, SDLA_DELETE_DLCI, 0, 0, &flp->dlci[i], sizeof(flp->dlci[i]), NULL, NULL);
- }
-
- memset(&intr, 0, sizeof(intr));
- /* let's start up the reception */
- switch(flp->type)
- {
- case SDLA_S502A:
- del_timer(&flp->timer);
- break;
-
- case SDLA_S502E:
- sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
- flp->state &= ~SDLA_S502E_INTACK;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
-
- case SDLA_S507:
- break;
-
- case SDLA_S508:
- sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
- flp->state &= ~SDLA_S508_INTEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- }
-
- sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
-
- netif_stop_queue(dev);
-
- return 0;
-}
-
-struct conf_data {
- struct frad_conf config;
- short dlci[CONFIG_DLCI_MAX];
-};
-
-static int sdla_open(struct net_device *dev)
-{
- struct frad_local *flp;
- struct dlci_local *dlp;
- struct conf_data data;
- struct intr_info intr;
- int len, i;
- char byte;
-
- flp = netdev_priv(dev);
-
- if (!flp->initialized)
- return -EPERM;
-
- if (!flp->configured)
- return -EPERM;
-
- /* time to send in the configuration */
- len = 0;
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i])
- data.dlci[len++] = abs(flp->dlci[i]);
- len *= 2;
-
- memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
- len += sizeof(struct frad_conf);
-
- sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
- sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
-
- if (flp->type == SDLA_S508)
- flp->buffer = 0;
-
- sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
-
- /* let's start up the reception */
- memset(&intr, 0, sizeof(intr));
- switch(flp->type)
- {
- case SDLA_S502A:
- flp->timer.expires = 1;
- add_timer(&flp->timer);
- break;
-
- case SDLA_S502E:
- flp->state |= SDLA_S502E_ENABLE;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- flp->state |= SDLA_S502E_INTACK;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- byte = 0;
- sdla_write(dev, SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
- intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
- sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
- break;
-
- case SDLA_S507:
- break;
-
- case SDLA_S508:
- flp->state |= SDLA_S508_INTEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- byte = 0;
- sdla_write(dev, SDLA_508_IRQ_INTERFACE, &byte, sizeof(byte));
- intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
- intr.irq = dev->irq;
- sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
- break;
- }
-
- if (flp->config.station == FRAD_STATION_CPE)
- {
- byte = SDLA_ICS_STATUS_ENQ;
- sdla_cmd(dev, SDLA_ISSUE_IN_CHANNEL_SIGNAL, 0, 0, &byte, sizeof(byte), NULL, NULL);
- }
- else
- {
- sdla_cmd(dev, SDLA_ADD_DLCI, 0, 0, data.dlci, len - sizeof(struct frad_conf), NULL, NULL);
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i] > 0)
- sdla_cmd(dev, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], 2*sizeof(flp->dlci[i]), NULL, NULL);
- }
-
- /* configure any specific DLCI settings */
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i])
- {
- dlp = netdev_priv(flp->master[i]);
- if (dlp->configured)
- sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL);
- }
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get)
-{
- struct frad_local *flp;
- struct conf_data data;
- int i;
- short size;
-
- if (dev->type == 0xFFFF)
- return -EUNATCH;
-
- flp = netdev_priv(dev);
-
- if (!get)
- {
- if (netif_running(dev))
- return -EBUSY;
-
- if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
- return -EFAULT;
-
- if (data.config.station & ~FRAD_STATION_NODE)
- return -EINVAL;
-
- if (data.config.flags & ~FRAD_VALID_FLAGS)
- return -EINVAL;
-
- if ((data.config.kbaud < 0) ||
- ((data.config.kbaud > 128) && (flp->type != SDLA_S508)))
- return -EINVAL;
-
- if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232))
- return -EINVAL;
-
- if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU))
- return -EINVAL;
-
- if ((data.config.T391 < 5) || (data.config.T391 > 30))
- return -EINVAL;
-
- if ((data.config.T392 < 5) || (data.config.T392 > 30))
- return -EINVAL;
-
- if ((data.config.N391 < 1) || (data.config.N391 > 255))
- return -EINVAL;
-
- if ((data.config.N392 < 1) || (data.config.N392 > 10))
- return -EINVAL;
-
- if ((data.config.N393 < 1) || (data.config.N393 > 10))
- return -EINVAL;
-
- memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
- flp->config.flags |= SDLA_DIRECT_RECV;
-
- if (flp->type == SDLA_S508)
- flp->config.flags |= SDLA_TX70_RX30;
-
- if (dev->mtu != flp->config.mtu)
- {
- /* this is required to change the MTU */
- dev->mtu = flp->config.mtu;
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i])
- flp->master[i]->mtu = flp->config.mtu;
- }
-
- flp->config.mtu += sizeof(struct frhdr);
-
- /* off to the races! */
- if (!flp->configured)
- sdla_start(dev);
-
- flp->configured = 1;
- }
- else
- {
- /* no sense reading if the CPU isn't started */
- if (netif_running(dev))
- {
- size = sizeof(data);
- if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
- return -EIO;
- }
- else
- if (flp->configured)
- memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
- else
- memset(&data.config, 0, sizeof(struct frad_conf));
-
- memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
- data.config.flags &= FRAD_VALID_FLAGS;
- data.config.mtu -= data.config.mtu > sizeof(struct frhdr) ? sizeof(struct frhdr) : data.config.mtu;
- return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0;
- }
-
- return 0;
-}
-
-static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read)
-{
- struct sdla_mem mem;
- char *temp;
-
- if(copy_from_user(&mem, info, sizeof(mem)))
- return -EFAULT;
-
- if (read)
- {
- temp = kzalloc(mem.len, GFP_KERNEL);
- if (!temp)
- return -ENOMEM;
- sdla_read(dev, mem.addr, temp, mem.len);
- if(copy_to_user(mem.data, temp, mem.len))
- {
- kfree(temp);
- return -EFAULT;
- }
- kfree(temp);
- }
- else
- {
- temp = memdup_user(mem.data, mem.len);
- if (IS_ERR(temp))
- return PTR_ERR(temp);
- sdla_write(dev, mem.addr, temp, mem.len);
- kfree(temp);
- }
- return 0;
-}
-
-static int sdla_reconfig(struct net_device *dev)
-{
- struct frad_local *flp;
- struct conf_data data;
- int i, len;
-
- flp = netdev_priv(dev);
-
- len = 0;
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i])
- data.dlci[len++] = flp->dlci[i];
- len *= 2;
-
- memcpy(&data, &flp->config, sizeof(struct frad_conf));
- len += sizeof(struct frad_conf);
-
- sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
- sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
- sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
-
- return 0;
-}
-
-static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct frad_local *flp;
-
- if(!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- flp = netdev_priv(dev);
-
- if (!flp->initialized)
- return -EINVAL;
-
- switch (cmd)
- {
- case FRAD_GET_CONF:
- case FRAD_SET_CONF:
- return sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF);
-
- case SDLA_IDENTIFY:
- ifr->ifr_flags = flp->type;
- break;
-
- case SDLA_CPUSPEED:
- return sdla_cpuspeed(dev, ifr);
-
-/* ==========================================================
-NOTE: This is rather a useless action right now, as the
- current driver does not support protocols other than
- FR. However, Sangoma has modules for a number of
- other protocols in the works.
-============================================================*/
- case SDLA_PROTOCOL:
- if (flp->configured)
- return -EALREADY;
-
- switch (ifr->ifr_flags)
- {
- case ARPHRD_FRAD:
- dev->type = ifr->ifr_flags;
- break;
- default:
- return -ENOPROTOOPT;
- }
- break;
-
- case SDLA_CLEARMEM:
- sdla_clear(dev);
- break;
-
- case SDLA_WRITEMEM:
- case SDLA_READMEM:
- if(!capable(CAP_SYS_RAWIO))
- return -EPERM;
- return sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM);
-
- case SDLA_START:
- sdla_start(dev);
- break;
-
- case SDLA_STOP:
- sdla_stop(dev);
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int sdla_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (netif_running(dev))
- return -EBUSY;
-
- /* for now, you can't change the MTU! */
- return -EOPNOTSUPP;
-}
-
-static int sdla_set_config(struct net_device *dev, struct ifmap *map)
-{
- struct frad_local *flp;
- int i;
- char byte;
- unsigned base;
- int err = -EINVAL;
-
- flp = netdev_priv(dev);
-
- if (flp->initialized)
- return -EINVAL;
-
- for(i=0; i < ARRAY_SIZE(valid_port); i++)
- if (valid_port[i] == map->base_addr)
- break;
-
- if (i == ARRAY_SIZE(valid_port))
- return -EINVAL;
-
- if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
- pr_warn("io-port 0x%04lx in use\n", dev->base_addr);
- return -EINVAL;
- }
- base = map->base_addr;
-
- /* test for card types, S502A, S502E, S507, S508 */
- /* these tests shut down the card completely, so clear the state */
- flp->type = SDLA_UNKNOWN;
- flp->state = 0;
-
- for(i=1;i<SDLA_IO_EXTENTS;i++)
- if (inb(base + i) != 0xFF)
- break;
-
- if (i == SDLA_IO_EXTENTS) {
- outb(SDLA_HALT, base + SDLA_REG_Z80_CONTROL);
- if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x08) {
- outb(SDLA_S502E_INTACK, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x0C) {
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- flp->type = SDLA_S502E;
- goto got_type;
- }
- }
- }
-
- for(byte=inb(base),i=0;i<SDLA_IO_EXTENTS;i++)
- if (inb(base + i) != byte)
- break;
-
- if (i == SDLA_IO_EXTENTS) {
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x30) {
- outb(SDLA_S507_ENABLE, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x32) {
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- flp->type = SDLA_S507;
- goto got_type;
- }
- }
- }
-
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x00) {
- outb(SDLA_S508_INTEN, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x10) {
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- flp->type = SDLA_S508;
- goto got_type;
- }
- }
-
- outb(SDLA_S502A_HALT, base + SDLA_REG_CONTROL);
- if (inb(base + SDLA_S502_STS) == 0x40) {
- outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
- if (inb(base + SDLA_S502_STS) == 0x40) {
- outb(SDLA_S502A_INTEN, base + SDLA_REG_CONTROL);
- if (inb(base + SDLA_S502_STS) == 0x44) {
- outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
- flp->type = SDLA_S502A;
- goto got_type;
- }
- }
- }
-
- netdev_notice(dev, "Unknown card type\n");
- err = -ENODEV;
- goto fail;
-
-got_type:
- switch(base) {
- case 0x270:
- case 0x280:
- case 0x380:
- case 0x390:
- if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
- goto fail;
- }
-
- switch (map->irq) {
- case 2:
- if (flp->type != SDLA_S502E)
- goto fail;
- break;
-
- case 10:
- case 11:
- case 12:
- case 15:
- case 4:
- if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
- goto fail;
- break;
- case 3:
- case 5:
- case 7:
- if (flp->type == SDLA_S502A)
- goto fail;
- break;
-
- default:
- goto fail;
- }
-
- err = -EAGAIN;
- if (request_irq(dev->irq, sdla_isr, 0, dev->name, dev))
- goto fail;
-
- if (flp->type == SDLA_S507) {
- switch(dev->irq) {
- case 3:
- flp->state = SDLA_S507_IRQ3;
- break;
- case 4:
- flp->state = SDLA_S507_IRQ4;
- break;
- case 5:
- flp->state = SDLA_S507_IRQ5;
- break;
- case 7:
- flp->state = SDLA_S507_IRQ7;
- break;
- case 10:
- flp->state = SDLA_S507_IRQ10;
- break;
- case 11:
- flp->state = SDLA_S507_IRQ11;
- break;
- case 12:
- flp->state = SDLA_S507_IRQ12;
- break;
- case 15:
- flp->state = SDLA_S507_IRQ15;
- break;
- }
- }
-
- for(i=0; i < ARRAY_SIZE(valid_mem); i++)
- if (valid_mem[i] == map->mem_start)
- break;
-
- err = -EINVAL;
- if (i == ARRAY_SIZE(valid_mem))
- goto fail2;
-
- if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E)
- goto fail2;
-
- if (flp->type != SDLA_S507 && map->mem_start >> 16 == 0x0B)
- goto fail2;
-
- if (flp->type == SDLA_S507 && map->mem_start >> 16 == 0x0D)
- goto fail2;
-
- byte = flp->type != SDLA_S508 ? SDLA_8K_WINDOW : 0;
- byte |= (map->mem_start & 0xF000) >> (12 + (flp->type == SDLA_S508 ? 1 : 0));
- switch(flp->type) {
- case SDLA_S502A:
- case SDLA_S502E:
- switch (map->mem_start >> 16) {
- case 0x0A:
- byte |= SDLA_S502_SEG_A;
- break;
- case 0x0C:
- byte |= SDLA_S502_SEG_C;
- break;
- case 0x0D:
- byte |= SDLA_S502_SEG_D;
- break;
- case 0x0E:
- byte |= SDLA_S502_SEG_E;
- break;
- }
- break;
- case SDLA_S507:
- switch (map->mem_start >> 16) {
- case 0x0A:
- byte |= SDLA_S507_SEG_A;
- break;
- case 0x0B:
- byte |= SDLA_S507_SEG_B;
- break;
- case 0x0C:
- byte |= SDLA_S507_SEG_C;
- break;
- case 0x0E:
- byte |= SDLA_S507_SEG_E;
- break;
- }
- break;
- case SDLA_S508:
- switch (map->mem_start >> 16) {
- case 0x0A:
- byte |= SDLA_S508_SEG_A;
- break;
- case 0x0C:
- byte |= SDLA_S508_SEG_C;
- break;
- case 0x0D:
- byte |= SDLA_S508_SEG_D;
- break;
- case 0x0E:
- byte |= SDLA_S508_SEG_E;
- break;
- }
- break;
- }
-
- /* set the memory bits, and enable access */
- outb(byte, base + SDLA_REG_PC_WINDOW);
-
- switch(flp->type)
- {
- case SDLA_S502E:
- flp->state = SDLA_S502E_ENABLE;
- break;
- case SDLA_S507:
- flp->state |= SDLA_MEMEN;
- break;
- case SDLA_S508:
- flp->state = SDLA_MEMEN;
- break;
- }
- outb(flp->state, base + SDLA_REG_CONTROL);
-
- dev->irq = map->irq;
- dev->base_addr = base;
- dev->mem_start = map->mem_start;
- dev->mem_end = dev->mem_start + 0x2000;
- flp->initialized = 1;
- return 0;
-
-fail2:
- free_irq(map->irq, dev);
-fail:
- release_region(base, SDLA_IO_EXTENTS);
- return err;
-}
-
-static const struct net_device_ops sdla_netdev_ops = {
- .ndo_open = sdla_open,
- .ndo_stop = sdla_close,
- .ndo_do_ioctl = sdla_ioctl,
- .ndo_set_config = sdla_set_config,
- .ndo_start_xmit = sdla_transmit,
- .ndo_change_mtu = sdla_change_mtu,
-};
-
-static void setup_sdla(struct net_device *dev)
-{
- struct frad_local *flp = netdev_priv(dev);
-
- netdev_boot_setup_check(dev);
-
- dev->netdev_ops = &sdla_netdev_ops;
- dev->flags = 0;
- dev->type = 0xFFFF;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->mtu = SDLA_MAX_MTU;
-
- flp->activate = sdla_activate;
- flp->deactivate = sdla_deactivate;
- flp->assoc = sdla_assoc;
- flp->deassoc = sdla_deassoc;
- flp->dlci_conf = sdla_dlci_conf;
- flp->dev = dev;
-
- timer_setup(&flp->timer, sdla_poll, 0);
- flp->timer.expires = 1;
-}
-
-static struct net_device *sdla;
-
-static int __init init_sdla(void)
-{
- int err;
-
- printk("%s.\n", version);
-
- sdla = alloc_netdev(sizeof(struct frad_local), "sdla0",
- NET_NAME_UNKNOWN, setup_sdla);
- if (!sdla)
- return -ENOMEM;
-
- err = register_netdev(sdla);
- if (err)
- free_netdev(sdla);
-
- return err;
-}
-
-static void __exit exit_sdla(void)
-{
- struct frad_local *flp = netdev_priv(sdla);
-
- unregister_netdev(sdla);
- if (flp->initialized) {
- free_irq(sdla->irq, sdla);
- release_region(sdla->base_addr, SDLA_IO_EXTENTS);
- }
- del_timer_sync(&flp->timer);
- free_netdev(sdla);
-}
-
-MODULE_LICENSE("GPL");
-
-module_init(init_sdla);
-module_exit(exit_sdla);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
deleted file mode 100644
index 54b1a5aee82d..000000000000
--- a/drivers/net/wan/x25_asy.c
+++ /dev/null
@@ -1,836 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Things to sort out:
- *
- * o tbusy handling
- * o allow users to set the parameters
- * o sync/async switching ?
- *
- * Note: This does _not_ implement CCITT X.25 asynchronous framing
- * recommendations. Its primarily for testing purposes. If you wanted
- * to do CCITT then in theory all you need is to nick the HDLC async
- * checksum routines from ppp.c
- * Changes:
- *
- * 2000-10-29 Henner Eisen lapb_data_indication() return status.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/lapb.h>
-#include <linux/init.h>
-#include <linux/rtnetlink.h>
-#include <linux/slab.h>
-#include <net/x25device.h>
-#include "x25_asy.h"
-
-static struct net_device **x25_asy_devs;
-static int x25_asy_maxdev = SL_NRUNIT;
-
-module_param(x25_asy_maxdev, int, 0);
-MODULE_LICENSE("GPL");
-
-static int x25_asy_esc(unsigned char *p, unsigned char *d, int len);
-static void x25_asy_unesc(struct x25_asy *sl, unsigned char c);
-static void x25_asy_setup(struct net_device *dev);
-
-/* Find a free X.25 channel, and link in this `tty' line. */
-static struct x25_asy *x25_asy_alloc(void)
-{
- struct net_device *dev = NULL;
- struct x25_asy *sl;
- int i;
-
- if (x25_asy_devs == NULL)
- return NULL; /* Master array missing ! */
-
- for (i = 0; i < x25_asy_maxdev; i++) {
- dev = x25_asy_devs[i];
-
- /* Not allocated ? */
- if (dev == NULL)
- break;
-
- sl = netdev_priv(dev);
- /* Not in use ? */
- if (!test_and_set_bit(SLF_INUSE, &sl->flags))
- return sl;
- }
-
-
- /* Sorry, too many, all slots in use */
- if (i >= x25_asy_maxdev)
- return NULL;
-
- /* If no channels are available, allocate one */
- if (!dev) {
- char name[IFNAMSIZ];
- sprintf(name, "x25asy%d", i);
-
- dev = alloc_netdev(sizeof(struct x25_asy), name,
- NET_NAME_UNKNOWN, x25_asy_setup);
- if (!dev)
- return NULL;
-
- /* Initialize channel control data */
- sl = netdev_priv(dev);
- dev->base_addr = i;
-
- /* register device so that it can be ifconfig'ed */
- if (register_netdev(dev) == 0) {
- /* (Re-)Set the INUSE bit. Very Important! */
- set_bit(SLF_INUSE, &sl->flags);
- x25_asy_devs[i] = dev;
- return sl;
- } else {
- pr_warn("%s(): register_netdev() failure\n", __func__);
- free_netdev(dev);
- }
- }
- return NULL;
-}
-
-
-/* Free an X.25 channel. */
-static void x25_asy_free(struct x25_asy *sl)
-{
- /* Free all X.25 frame buffers. */
- kfree(sl->rbuff);
- sl->rbuff = NULL;
- kfree(sl->xbuff);
- sl->xbuff = NULL;
-
- if (!test_and_clear_bit(SLF_INUSE, &sl->flags))
- netdev_err(sl->dev, "x25_asy_free for already free unit\n");
-}
-
-static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
-{
- struct x25_asy *sl = netdev_priv(dev);
- unsigned char *xbuff, *rbuff;
- int len;
-
- len = 2 * newmtu;
- xbuff = kmalloc(len + 4, GFP_ATOMIC);
- rbuff = kmalloc(len + 4, GFP_ATOMIC);
-
- if (xbuff == NULL || rbuff == NULL) {
- kfree(xbuff);
- kfree(rbuff);
- return -ENOMEM;
- }
-
- spin_lock_bh(&sl->lock);
- xbuff = xchg(&sl->xbuff, xbuff);
- if (sl->xleft) {
- if (sl->xleft <= len) {
- memcpy(sl->xbuff, sl->xhead, sl->xleft);
- } else {
- sl->xleft = 0;
- dev->stats.tx_dropped++;
- }
- }
- sl->xhead = sl->xbuff;
-
- rbuff = xchg(&sl->rbuff, rbuff);
- if (sl->rcount) {
- if (sl->rcount <= len) {
- memcpy(sl->rbuff, rbuff, sl->rcount);
- } else {
- sl->rcount = 0;
- dev->stats.rx_over_errors++;
- set_bit(SLF_ERROR, &sl->flags);
- }
- }
-
- dev->mtu = newmtu;
- sl->buffsize = len;
-
- spin_unlock_bh(&sl->lock);
-
- kfree(xbuff);
- kfree(rbuff);
- return 0;
-}
-
-
-/* Set the "sending" flag. This must be atomic, hence the ASM. */
-
-static inline void x25_asy_lock(struct x25_asy *sl)
-{
- netif_stop_queue(sl->dev);
-}
-
-
-/* Clear the "sending" flag. This must be atomic, hence the ASM. */
-
-static inline void x25_asy_unlock(struct x25_asy *sl)
-{
- netif_wake_queue(sl->dev);
-}
-
-/* Send an LAPB frame to the LAPB module to process. */
-
-static void x25_asy_bump(struct x25_asy *sl)
-{
- struct net_device *dev = sl->dev;
- struct sk_buff *skb;
- int count;
- int err;
-
- count = sl->rcount;
- dev->stats.rx_bytes += count;
-
- skb = dev_alloc_skb(count);
- if (skb == NULL) {
- netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
- dev->stats.rx_dropped++;
- return;
- }
- skb_put_data(skb, sl->rbuff, count);
- err = lapb_data_received(sl->dev, skb);
- if (err != LAPB_OK) {
- kfree_skb(skb);
- printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
- } else {
- dev->stats.rx_packets++;
- }
-}
-
-/* Encapsulate one IP datagram and stuff into a TTY queue. */
-static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
-{
- unsigned char *p;
- int actual, count, mtu = sl->dev->mtu;
-
- if (len > mtu) {
- /* Sigh, shouldn't occur BUT ... */
- len = mtu;
- printk(KERN_DEBUG "%s: truncating oversized transmit packet!\n",
- sl->dev->name);
- sl->dev->stats.tx_dropped++;
- x25_asy_unlock(sl);
- return;
- }
-
- p = icp;
- count = x25_asy_esc(p, sl->xbuff, len);
-
- /* Order of next two lines is *very* important.
- * When we are sending a little amount of data,
- * the transfer may be completed inside driver.write()
- * routine, because it's running with interrupts enabled.
- * In this case we *never* got WRITE_WAKEUP event,
- * if we did not request it before write operation.
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
- set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
- sl->xleft = count - actual;
- sl->xhead = sl->xbuff + actual;
-}
-
-/*
- * Called by the driver when there's room for more data. If we have
- * more packets to send, we send them here.
- */
-static void x25_asy_write_wakeup(struct tty_struct *tty)
-{
- int actual;
- struct x25_asy *sl = tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
- return;
-
- if (sl->xleft <= 0) {
- /* Now serial buffer is almost free & we can start
- * transmission of another packet */
- sl->dev->stats.tx_packets++;
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- x25_asy_unlock(sl);
- return;
- }
-
- actual = tty->ops->write(tty, sl->xhead, sl->xleft);
- sl->xleft -= actual;
- sl->xhead += actual;
-}
-
-static void x25_asy_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct x25_asy *sl = netdev_priv(dev);
-
- spin_lock(&sl->lock);
- if (netif_queue_stopped(dev)) {
- /* May be we must check transmitter timeout here ?
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
- netdev_warn(dev, "transmit timed out, %s?\n",
- (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
- "bad line quality" : "driver error");
- sl->xleft = 0;
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- x25_asy_unlock(sl);
- }
- spin_unlock(&sl->lock);
-}
-
-/* Encapsulate an IP datagram and kick it into a TTY queue. */
-
-static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct x25_asy *sl = netdev_priv(dev);
- int err;
-
- if (!netif_running(sl->dev)) {
- netdev_err(dev, "xmit call when iface is down\n");
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- /* There should be a pseudo header of 1 byte added by upper layers.
- * Check to make sure it is there before reading it.
- */
- if (skb->len < 1) {
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- switch (skb->data[0]) {
- case X25_IFACE_DATA:
- break;
- case X25_IFACE_CONNECT: /* Connection request .. do nothing */
- err = lapb_connect_request(dev);
- if (err != LAPB_OK)
- netdev_err(dev, "lapb_connect_request error: %d\n",
- err);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
- err = lapb_disconnect_request(dev);
- if (err != LAPB_OK)
- netdev_err(dev, "lapb_disconnect_request error: %d\n",
- err);
- fallthrough;
- default:
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- skb_pull(skb, 1); /* Remove control byte */
- /*
- * If we are busy already- too bad. We ought to be able
- * to queue things at this point, to allow for a little
- * frame buffer. Oh well...
- * -----------------------------------------------------
- * I hate queues in X.25 driver. May be it's efficient,
- * but for me latency is more important. ;)
- * So, no queues !
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
-
- err = lapb_data_request(dev, skb);
- if (err != LAPB_OK) {
- netdev_err(dev, "lapb_data_request error: %d\n", err);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- return NETDEV_TX_OK;
-}
-
-
-/*
- * LAPB interface boilerplate
- */
-
-/*
- * Called when I frame data arrive. We add a pseudo header for upper
- * layers and pass it to upper layers.
- */
-
-static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
-{
- if (skb_cow(skb, 1)) {
- kfree_skb(skb);
- return NET_RX_DROP;
- }
- skb_push(skb, 1);
- skb->data[0] = X25_IFACE_DATA;
-
- skb->protocol = x25_type_trans(skb, dev);
-
- return netif_rx(skb);
-}
-
-/*
- * Data has emerged from the LAPB protocol machine. We don't handle
- * busy cases too well. Its tricky to see how to do this nicely -
- * perhaps lapb should allow us to bounce this ?
- */
-
-static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
-{
- struct x25_asy *sl = netdev_priv(dev);
-
- spin_lock(&sl->lock);
- if (netif_queue_stopped(sl->dev) || sl->tty == NULL) {
- spin_unlock(&sl->lock);
- netdev_err(dev, "tbusy drop\n");
- kfree_skb(skb);
- return;
- }
- /* We were not busy, so we are now... :-) */
- if (skb != NULL) {
- x25_asy_lock(sl);
- dev->stats.tx_bytes += skb->len;
- x25_asy_encaps(sl, skb->data, skb->len);
- dev_kfree_skb(skb);
- }
- spin_unlock(&sl->lock);
-}
-
-/*
- * LAPB connection establish/down information.
- */
-
-static void x25_asy_connected(struct net_device *dev, int reason)
-{
- struct x25_asy *sl = netdev_priv(dev);
- struct sk_buff *skb;
- unsigned char *ptr;
-
- skb = dev_alloc_skb(1);
- if (skb == NULL) {
- netdev_err(dev, "out of memory\n");
- return;
- }
-
- ptr = skb_put(skb, 1);
- *ptr = X25_IFACE_CONNECT;
-
- skb->protocol = x25_type_trans(skb, sl->dev);
- netif_rx(skb);
-}
-
-static void x25_asy_disconnected(struct net_device *dev, int reason)
-{
- struct x25_asy *sl = netdev_priv(dev);
- struct sk_buff *skb;
- unsigned char *ptr;
-
- skb = dev_alloc_skb(1);
- if (skb == NULL) {
- netdev_err(dev, "out of memory\n");
- return;
- }
-
- ptr = skb_put(skb, 1);
- *ptr = X25_IFACE_DISCONNECT;
-
- skb->protocol = x25_type_trans(skb, sl->dev);
- netif_rx(skb);
-}
-
-static const struct lapb_register_struct x25_asy_callbacks = {
- .connect_confirmation = x25_asy_connected,
- .connect_indication = x25_asy_connected,
- .disconnect_confirmation = x25_asy_disconnected,
- .disconnect_indication = x25_asy_disconnected,
- .data_indication = x25_asy_data_indication,
- .data_transmit = x25_asy_data_transmit,
-};
-
-
-/* Open the low-level part of the X.25 channel. Easy! */
-static int x25_asy_open(struct net_device *dev)
-{
- struct x25_asy *sl = netdev_priv(dev);
- unsigned long len;
-
- if (sl->tty == NULL)
- return -ENODEV;
-
- /*
- * Allocate the X.25 frame buffers:
- *
- * rbuff Receive buffer.
- * xbuff Transmit buffer.
- */
-
- len = dev->mtu * 2;
-
- sl->rbuff = kmalloc(len + 4, GFP_KERNEL);
- if (sl->rbuff == NULL)
- goto norbuff;
- sl->xbuff = kmalloc(len + 4, GFP_KERNEL);
- if (sl->xbuff == NULL)
- goto noxbuff;
-
- sl->buffsize = len;
- sl->rcount = 0;
- sl->xleft = 0;
- sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */
-
- return 0;
-
- /* Cleanup */
- kfree(sl->xbuff);
- sl->xbuff = NULL;
-noxbuff:
- kfree(sl->rbuff);
- sl->rbuff = NULL;
-norbuff:
- return -ENOMEM;
-}
-
-
-/* Close the low-level part of the X.25 channel. Easy! */
-static int x25_asy_close(struct net_device *dev)
-{
- struct x25_asy *sl = netdev_priv(dev);
-
- spin_lock(&sl->lock);
- if (sl->tty)
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
-
- sl->rcount = 0;
- sl->xleft = 0;
- spin_unlock(&sl->lock);
- return 0;
-}
-
-/*
- * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
- * a block of X.25 data has been received, which can now be decapsulated
- * and sent on to some IP layer for further processing.
- */
-
-static void x25_asy_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
-{
- struct x25_asy *sl = tty->disc_data;
-
- if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
- return;
-
-
- /* Read the characters out of the buffer */
- while (count--) {
- if (fp && *fp++) {
- if (!test_and_set_bit(SLF_ERROR, &sl->flags))
- sl->dev->stats.rx_errors++;
- cp++;
- continue;
- }
- x25_asy_unesc(sl, *cp++);
- }
-}
-
-/*
- * Open the high-level part of the X.25 channel.
- * This function is called by the TTY module when the
- * X.25 line discipline is called for. Because we are
- * sure the tty line exists, we only have to link it to
- * a free X.25 channel...
- */
-
-static int x25_asy_open_tty(struct tty_struct *tty)
-{
- struct x25_asy *sl;
- int err;
-
- if (tty->ops->write == NULL)
- return -EOPNOTSUPP;
-
- /* OK. Find a free X.25 channel to use. */
- sl = x25_asy_alloc();
- if (sl == NULL)
- return -ENFILE;
-
- sl->tty = tty;
- tty->disc_data = sl;
- tty->receive_room = 65536;
- tty_driver_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- /* Restore default settings */
- sl->dev->type = ARPHRD_X25;
-
- /* Perform the low-level X.25 async init */
- err = x25_asy_open(sl->dev);
- if (err) {
- x25_asy_free(sl);
- return err;
- }
- /* Done. We have linked the TTY line to a channel. */
- return 0;
-}
-
-
-/*
- * Close down an X.25 channel.
- * This means flushing out any pending queues, and then restoring the
- * TTY line discipline to what it was before it got hooked to X.25
- * (which usually is TTY again).
- */
-static void x25_asy_close_tty(struct tty_struct *tty)
-{
- struct x25_asy *sl = tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != X25_ASY_MAGIC)
- return;
-
- rtnl_lock();
- if (sl->dev->flags & IFF_UP)
- dev_close(sl->dev);
- rtnl_unlock();
-
- tty->disc_data = NULL;
- sl->tty = NULL;
- x25_asy_free(sl);
-}
-
- /************************************************************************
- * STANDARD X.25 ENCAPSULATION *
- ************************************************************************/
-
-static int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
-{
- unsigned char *ptr = d;
- unsigned char c;
-
- /*
- * Send an initial END character to flush out any
- * data that may have accumulated in the receiver
- * due to line noise.
- */
-
- *ptr++ = X25_END; /* Send 10111110 bit seq */
-
- /*
- * For each byte in the packet, send the appropriate
- * character sequence, according to the X.25 protocol.
- */
-
- while (len-- > 0) {
- switch (c = *s++) {
- case X25_END:
- *ptr++ = X25_ESC;
- *ptr++ = X25_ESCAPE(X25_END);
- break;
- case X25_ESC:
- *ptr++ = X25_ESC;
- *ptr++ = X25_ESCAPE(X25_ESC);
- break;
- default:
- *ptr++ = c;
- break;
- }
- }
- *ptr++ = X25_END;
- return ptr - d;
-}
-
-static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
-{
-
- switch (s) {
- case X25_END:
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount >= 2)
- x25_asy_bump(sl);
- clear_bit(SLF_ESCAPE, &sl->flags);
- sl->rcount = 0;
- return;
- case X25_ESC:
- set_bit(SLF_ESCAPE, &sl->flags);
- return;
- case X25_ESCAPE(X25_ESC):
- case X25_ESCAPE(X25_END):
- if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
- s = X25_UNESCAPE(s);
- break;
- }
- if (!test_bit(SLF_ERROR, &sl->flags)) {
- if (sl->rcount < sl->buffsize) {
- sl->rbuff[sl->rcount++] = s;
- return;
- }
- sl->dev->stats.rx_over_errors++;
- set_bit(SLF_ERROR, &sl->flags);
- }
-}
-
-
-/* Perform I/O control on an active X.25 channel. */
-static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct x25_asy *sl = tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != X25_ASY_MAGIC)
- return -EINVAL;
-
- switch (cmd) {
- case SIOCGIFNAME:
- if (copy_to_user((void __user *)arg, sl->dev->name,
- strlen(sl->dev->name) + 1))
- return -EFAULT;
- return 0;
- case SIOCSIFHWADDR:
- return -EINVAL;
- default:
- return tty_mode_ioctl(tty, file, cmd, arg);
- }
-}
-
-static int x25_asy_open_dev(struct net_device *dev)
-{
- int err;
- struct x25_asy *sl = netdev_priv(dev);
- if (sl->tty == NULL)
- return -ENODEV;
-
- err = lapb_register(dev, &x25_asy_callbacks);
- if (err != LAPB_OK)
- return -ENOMEM;
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int x25_asy_close_dev(struct net_device *dev)
-{
- int err;
-
- netif_stop_queue(dev);
-
- err = lapb_unregister(dev);
- if (err != LAPB_OK)
- pr_err("%s: lapb_unregister error: %d\n",
- __func__, err);
-
- x25_asy_close(dev);
-
- return 0;
-}
-
-static const struct net_device_ops x25_asy_netdev_ops = {
- .ndo_open = x25_asy_open_dev,
- .ndo_stop = x25_asy_close_dev,
- .ndo_start_xmit = x25_asy_xmit,
- .ndo_tx_timeout = x25_asy_timeout,
- .ndo_change_mtu = x25_asy_change_mtu,
-};
-
-/* Initialise the X.25 driver. Called by the device init code */
-static void x25_asy_setup(struct net_device *dev)
-{
- struct x25_asy *sl = netdev_priv(dev);
-
- sl->magic = X25_ASY_MAGIC;
- sl->dev = dev;
- spin_lock_init(&sl->lock);
- set_bit(SLF_INUSE, &sl->flags);
-
- /*
- * Finish setting up the DEVICE info.
- */
-
- dev->mtu = SL_MTU;
- dev->min_mtu = 0;
- dev->max_mtu = 65534;
- dev->netdev_ops = &x25_asy_netdev_ops;
- dev->watchdog_timeo = HZ*20;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->type = ARPHRD_X25;
- dev->tx_queue_len = 10;
-
- /* When transmitting data:
- * first this driver removes a pseudo header of 1 byte,
- * then the lapb module prepends an LAPB header of at most 3 bytes.
- */
- dev->needed_headroom = 3 - 1;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
-}
-
-static struct tty_ldisc_ops x25_ldisc = {
- .owner = THIS_MODULE,
- .magic = TTY_LDISC_MAGIC,
- .name = "X.25",
- .open = x25_asy_open_tty,
- .close = x25_asy_close_tty,
- .ioctl = x25_asy_ioctl,
- .receive_buf = x25_asy_receive_buf,
- .write_wakeup = x25_asy_write_wakeup,
-};
-
-static int __init init_x25_asy(void)
-{
- if (x25_asy_maxdev < 4)
- x25_asy_maxdev = 4; /* Sanity */
-
- pr_info("X.25 async: version 0.00 ALPHA (dynamic channels, max=%d)\n",
- x25_asy_maxdev);
-
- x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *),
- GFP_KERNEL);
- if (!x25_asy_devs)
- return -ENOMEM;
-
- return tty_register_ldisc(N_X25, &x25_ldisc);
-}
-
-
-static void __exit exit_x25_asy(void)
-{
- struct net_device *dev;
- int i;
-
- for (i = 0; i < x25_asy_maxdev; i++) {
- dev = x25_asy_devs[i];
- if (dev) {
- struct x25_asy *sl = netdev_priv(dev);
-
- spin_lock_bh(&sl->lock);
- if (sl->tty)
- tty_hangup(sl->tty);
-
- spin_unlock_bh(&sl->lock);
- /*
- * VSV = if dev->start==0, then device
- * unregistered while close proc.
- */
- unregister_netdev(dev);
- free_netdev(dev);
- }
- }
-
- kfree(x25_asy_devs);
- tty_unregister_ldisc(N_X25);
-}
-
-module_init(init_x25_asy);
-module_exit(exit_x25_asy);
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
deleted file mode 100644
index 87798287c9ca..000000000000
--- a/drivers/net/wan/x25_asy.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_X25_ASY_H
-#define _LINUX_X25_ASY_H
-
-/* X.25 asy configuration. */
-#define SL_NRUNIT 256 /* MAX number of X.25 channels;
- This can be overridden with
- insmod -ox25_asy_maxdev=nnn */
-#define SL_MTU 256
-
-/* X25 async protocol characters. */
-#define X25_END 0x7E /* indicates end of frame */
-#define X25_ESC 0x7D /* indicates byte stuffing */
-#define X25_ESCAPE(x) ((x)^0x20)
-#define X25_UNESCAPE(x) ((x)^0x20)
-
-
-struct x25_asy {
- int magic;
-
- /* Various fields. */
- spinlock_t lock;
- struct tty_struct *tty; /* ptr to TTY structure */
- struct net_device *dev; /* easy for intr handling */
-
- /* These are pointers to the malloc()ed frame buffers. */
- unsigned char *rbuff; /* receiver buffer */
- int rcount; /* received chars counter */
- unsigned char *xbuff; /* transmitter buffer */
- unsigned char *xhead; /* pointer to next byte to XMIT */
- int xleft; /* bytes left in XMIT queue */
- int buffsize; /* Max buffers sizes */
-
- unsigned long flags; /* Flag values/ mode etc */
-#define SLF_INUSE 0 /* Channel in use */
-#define SLF_ESCAPE 1 /* ESC received */
-#define SLF_ERROR 2 /* Parity, etc. error */
-};
-
-
-
-#define X25_ASY_MAGIC 0x5303
-
-int x25_asy_init(struct net_device *dev);
-
-#endif /* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wimax/Kconfig b/drivers/net/wimax/Kconfig
deleted file mode 100644
index 2249e3d77a76..000000000000
--- a/drivers/net/wimax/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# WiMAX LAN device drivers configuration
-#
-
-
-comment "Enable WiMAX (Networking options) to see the WiMAX drivers"
- depends on WIMAX = n
-
-if WIMAX
-
-menu "WiMAX Wireless Broadband devices"
-
-source "drivers/net/wimax/i2400m/Kconfig"
-
-endmenu
-
-endif
diff --git a/drivers/net/wimax/Makefile b/drivers/net/wimax/Makefile
deleted file mode 100644
index b4575bacf994..000000000000
--- a/drivers/net/wimax/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_WIMAX_I2400M) += i2400m/
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index c9f65e96ccb0..a3ed49cd95c3 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -215,7 +215,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = wg_open,
.ndo_stop = wg_stop,
.ndo_start_xmit = wg_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64
+ .ndo_get_stats64 = dev_get_tstats64
};
static void wg_destruct(struct net_device *dev)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 170a64e67709..7add2002ff4c 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -18,19 +18,6 @@ menuconfig WLAN
if WLAN
-config WIRELESS_WDS
- bool "mac80211-based legacy WDS support" if EXPERT
- help
- This option enables the deprecated WDS support, the newer
- mac80211-based 4-addr AP/client support supersedes it with
- a much better feature set (HT, VHT, ...)
-
- We plan to remove this option and code, so if you find
- that you have to enable it, please let us know on the
- linux-wireless@vger.kernel.org mailing list, so we can
- help you migrate to 4-addr AP/client (or, if it's really
- necessary, give up on our plan of removing it).
-
source "drivers/net/wireless/admtek/Kconfig"
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/atmel/Kconfig"
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index e06b74a54a69..13b4f5f50f8a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -661,7 +661,6 @@ struct ath9k_vif_iter_data {
int naps; /* number of AP vifs */
int nmeshes; /* number of mesh vifs */
int nstations; /* number of station vifs */
- int nwds; /* number of WDS vifs */
int nadhocs; /* number of adhoc vifs */
int nocbs; /* number of OCB vifs */
int nbcnvifs; /* number of beaconing vifs */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 26ea51a72156..017a43bc400c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -735,10 +735,10 @@ static int read_file_misc(struct seq_file *file, void *data)
ath9k_calculate_iter_data(sc, ctx, &iter_data);
seq_printf(file,
- "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i WDS: %i",
+ "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i",
i++, (int)(ctx->assigned), iter_data.naps,
iter_data.nstations,
- iter_data.nmeshes, iter_data.nwds);
+ iter_data.nmeshes);
seq_printf(file, " ADHOC: %i OCB: %i TOTAL: %hi BEACON-VIF: %hi\n",
iter_data.nadhocs, iter_data.nocbs, sc->cur_chan->nvifs,
sc->nbcnvifs);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 690fe3a1b516..42a208787f5a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -832,12 +832,6 @@ static const struct ieee80211_iface_limit if_limits[] = {
BIT(NL80211_IFTYPE_P2P_GO) },
};
-#ifdef CONFIG_WIRELESS_WDS
-static const struct ieee80211_iface_limit wds_limits[] = {
- { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) },
-};
-#endif
-
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
static const struct ieee80211_iface_limit if_limits_multi[] = {
@@ -874,15 +868,6 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_40),
#endif
},
-#ifdef CONFIG_WIRELESS_WDS
- {
- .limits = wds_limits,
- .n_limits = ARRAY_SIZE(wds_limits),
- .max_interfaces = 2048,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
- },
-#endif
};
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
@@ -897,7 +882,6 @@ static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, QUEUE_CONTROL);
hw->queues = ATH9K_NUM_TX_QUEUES;
hw->offchannel_tx_hw_queue = hw->queues - 1;
- hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
hw->wiphy->iface_combinations = if_comb_multi;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
hw->wiphy->max_scan_ssids = 255;
@@ -953,9 +937,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT) |
-#ifdef CONFIG_WIRELESS_WDS
- BIT(NL80211_IFTYPE_WDS) |
-#endif
BIT(NL80211_IFTYPE_OCB);
if (ath9k_is_chanctx_enabled())
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 8dbf68b94228..caebe3fd6869 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -973,9 +973,6 @@ static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
if (vif->bss_conf.enable_beacon)
ath9k_vif_iter_set_beacon(iter_data, vif);
break;
- case NL80211_IFTYPE_WDS:
- iter_data->nwds++;
- break;
default:
break;
}
@@ -1136,8 +1133,6 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ah->opmode = NL80211_IFTYPE_MESH_POINT;
else if (iter_data.nocbs)
ah->opmode = NL80211_IFTYPE_OCB;
- else if (iter_data.nwds)
- ah->opmode = NL80211_IFTYPE_AP;
else if (iter_data.nadhocs)
ah->opmode = NL80211_IFTYPE_ADHOC;
else
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index b2eeb9fd68d2..6cdbee5beb07 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -329,10 +329,6 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
/* iwlagn 802.11n STA Workaround */
rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
break;
- case NL80211_IFTYPE_WDS:
- cam_mode |= AR9170_MAC_CAM_AP_WDS;
- rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
- break;
case NL80211_IFTYPE_STATION:
cam_mode |= AR9170_MAC_CAM_STA;
rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index dbef9d8fc893..cca3b086aa70 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -646,7 +646,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
if ((vif->type == NL80211_IFTYPE_STATION) ||
- (vif->type == NL80211_IFTYPE_WDS) ||
(vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_MESH_POINT))
break;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index f175dbaffc30..150a366e8f62 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -4961,12 +4961,11 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
struct b43_wldev *dev;
int err = -EOPNOTSUPP;
- /* TODO: allow WDS/AP devices to coexist */
+ /* TODO: allow AP devices to coexist */
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_WDS &&
vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
@@ -5576,9 +5575,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_STATION) |
-#ifdef CONFIG_WIRELESS_WDS
- BIT(NL80211_IFTYPE_WDS) |
-#endif
BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index a27125b7922c..7692a2618c97 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3381,11 +3381,10 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
unsigned long flags;
int err = -EOPNOTSUPP;
- /* TODO: allow WDS/AP devices to coexist */
+ /* TODO: allow AP devices to coexist */
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_WDS &&
vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
@@ -3805,9 +3804,6 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
-#ifdef CONFIG_WIRELESS_WDS
- BIT(NL80211_IFTYPE_WDS) |
-#endif
BIT(NL80211_IFTYPE_ADHOC);
hw->queues = 1; /* FIXME: hardware has more queues */
hw->max_rates = 2;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index bf6dbeb61842..ad726bd100ec 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -126,28 +126,13 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
qtnf_packet_send_hi_pri(skb);
- qtnf_update_tx_stats(ndev, skb);
+ dev_sw_netstats_tx_add(ndev, 1, skb->len);
return NETDEV_TX_OK;
}
return qtnf_bus_data_tx(mac->bus, skb, mac->macid, vif->vifid);
}
-/* Netdev handler for getting stats.
- */
-static void qtnf_netdev_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
-
- netdev_stats_to_stats64(stats, &ndev->stats);
-
- if (!vif->stats64)
- return;
-
- dev_fetch_sw_netstats(stats, vif->stats64);
-}
-
/* Netdev handler for transmission timeout.
*/
static void qtnf_netdev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
@@ -211,13 +196,27 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev,
return 0;
}
+static int qtnf_netdev_alloc_pcpu_stats(struct net_device *dev)
+{
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+
+ return dev->tstats ? 0 : -ENOMEM;
+}
+
+static void qtnf_netdev_free_pcpu_stats(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+}
+
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
+ .ndo_init = qtnf_netdev_alloc_pcpu_stats,
+ .ndo_uninit = qtnf_netdev_free_pcpu_stats,
.ndo_open = qtnf_netdev_open,
.ndo_stop = qtnf_netdev_close,
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
- .ndo_get_stats64 = qtnf_netdev_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = qtnf_netdev_set_mac_address,
.ndo_get_port_parent_id = qtnf_netdev_port_parent_id,
};
@@ -448,10 +447,6 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
qtnf_sta_list_init(&vif->sta_list);
INIT_WORK(&vif->high_pri_tx_work, qtnf_vif_send_data_high_pri);
skb_queue_head_init(&vif->high_pri_tx_queue);
- vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!vif->stats64)
- pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
- macid, i);
}
qtnf_mac_init_primary_intf(mac);
@@ -531,7 +526,6 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
}
rtnl_unlock();
qtnf_sta_list_free(&vif->sta_list);
- free_percpu(vif->stats64);
}
if (mac->wiphy_registered)
@@ -924,46 +918,6 @@ void qtnf_wake_all_queues(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(qtnf_wake_all_queues);
-void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
- struct pcpu_sw_netstats *stats64;
-
- if (unlikely(!vif || !vif->stats64)) {
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
- return;
- }
-
- stats64 = this_cpu_ptr(vif->stats64);
-
- u64_stats_update_begin(&stats64->syncp);
- stats64->rx_packets++;
- stats64->rx_bytes += skb->len;
- u64_stats_update_end(&stats64->syncp);
-}
-EXPORT_SYMBOL_GPL(qtnf_update_rx_stats);
-
-void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
- struct pcpu_sw_netstats *stats64;
-
- if (unlikely(!vif || !vif->stats64)) {
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- return;
- }
-
- stats64 = this_cpu_ptr(vif->stats64);
-
- u64_stats_update_begin(&stats64->syncp);
- stats64->tx_packets++;
- stats64->tx_bytes += skb->len;
- u64_stats_update_end(&stats64->syncp);
-}
-EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
-
struct dentry *qtnf_get_debugfs_dir(void)
{
return qtnf_debugfs_dir;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 269ce12cf8bf..b204a24074ab 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -70,8 +70,6 @@ struct qtnf_vif {
struct qtnf_sta_list sta_list;
unsigned long cons_tx_timeout_cnt;
int generation;
-
- struct pcpu_sw_netstats __percpu *stats64;
};
struct qtnf_mac_info {
@@ -139,8 +137,6 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
void qtnf_wake_all_queues(struct net_device *ndev);
-void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb);
-void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb);
void qtnf_virtual_intf_cleanup(struct net_device *ndev);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 9a20c0f29078..0003df577cb3 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -489,7 +489,7 @@ static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
PCI_DMA_TODEVICE);
if (skb->dev) {
- qtnf_update_tx_stats(skb->dev, skb);
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
if (unlikely(priv->tx_stopped)) {
qtnf_wake_all_queues(skb->dev);
priv->tx_stopped = 0;
@@ -756,7 +756,7 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
skb_put(skb, psize);
ndev = qtnf_classify_skb(bus, skb);
if (likely(ndev)) {
- qtnf_update_rx_stats(ndev, skb);
+ dev_sw_netstats_rx_add(ndev, skb->len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(napi, skb);
} else {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 4b87d3151017..24f1be8ddcef 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -418,7 +418,7 @@ static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
PCI_DMA_TODEVICE);
if (skb->dev) {
- qtnf_update_tx_stats(skb->dev, skb);
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
if (unlikely(priv->tx_stopped)) {
qtnf_wake_all_queues(skb->dev);
priv->tx_stopped = 0;
@@ -662,7 +662,7 @@ static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
skb_put(skb, psize);
ndev = qtnf_classify_skb(bus, skb);
if (likely(ndev)) {
- qtnf_update_rx_stats(ndev, skb);
+ dev_sw_netstats_rx_add(ndev, skb->len);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
} else {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
index 0ee1813e8453..6bafdd991171 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
@@ -32,7 +32,6 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_WDS:
conf.sync = TSF_SYNC_AP_NONE;
break;
case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index b04f76551ca4..61a4f1ad31e2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -194,8 +194,7 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC &&
- vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_WDS)
+ vif->type != NL80211_IFTYPE_MESH_POINT)
return;
/*
@@ -1437,9 +1436,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
-#ifdef CONFIG_WIRELESS_WDS
- BIT(NL80211_IFTYPE_WDS) |
-#endif
BIT(NL80211_IFTYPE_AP);
rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 2f68a31072ae..dea5babd30fe 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -408,8 +408,7 @@ static void rt2x00mac_set_tim_iter(void *data, u8 *mac,
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC &&
- vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_WDS)
+ vif->type != NL80211_IFTYPE_MESH_POINT)
return;
set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 75b5d545b49e..9fe77556858e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3379,7 +3379,7 @@ static const struct net_device_ops rndis_wlan_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = rndis_wlan_set_multicast_list,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 3e9895bec15f..920cac4385bf 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2413,12 +2413,11 @@ static ssize_t store_rxbuf(struct device *dev,
const char *buf, size_t len)
{
char *endp;
- unsigned long target;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- target = simple_strtoul(buf, &endp, 0);
+ simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EBADMSG;
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index ba6c486d6465..f8e5d78d9078 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -136,7 +136,7 @@ static struct nci_ops s3fwrn5_nci_ops = {
};
int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
- const struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload)
+ const struct s3fwrn5_phy_ops *phy_ops)
{
struct s3fwrn5_info *info;
int ret;
@@ -148,7 +148,6 @@ int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
info->phy_id = phy_id;
info->pdev = pdev;
info->phy_ops = phy_ops;
- info->max_payload = max_payload;
mutex_init(&info->mutex);
s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index ec930ee2c847..4cde6dd5c019 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -266,7 +266,7 @@ static int s3fwrn5_fw_complete_update_mode(struct s3fwrn5_fw_info *fw_info)
}
/*
- * Firmware header stucture:
+ * Firmware header structure:
*
* 0x00 - 0x0B : Date and time string (w/o NUL termination)
* 0x10 - 0x13 : Firmware version
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index dc995286be84..0ffa389066a0 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -19,7 +19,6 @@
#define S3FWRN5_I2C_DRIVER_NAME "s3fwrn5_i2c"
-#define S3FWRN5_I2C_MAX_PAYLOAD 32
#define S3FWRN5_EN_WAIT_TIME 150
struct s3fwrn5_i2c_phy {
@@ -248,8 +247,7 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops,
- S3FWRN5_I2C_MAX_PAYLOAD);
+ ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops);
if (ret < 0)
return ret;
diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h
index ede68bb5eeae..bb8f936d13a2 100644
--- a/drivers/nfc/s3fwrn5/s3fwrn5.h
+++ b/drivers/nfc/s3fwrn5/s3fwrn5.h
@@ -34,7 +34,6 @@ struct s3fwrn5_info {
struct device *pdev;
const struct s3fwrn5_phy_ops *phy_ops;
- unsigned int max_payload;
struct s3fwrn5_fw_info fw_info;
@@ -45,7 +44,7 @@ static inline int s3fwrn5_set_mode(struct s3fwrn5_info *info,
enum s3fwrn5_mode mode)
{
if (!info->phy_ops->set_mode)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
info->phy_ops->set_mode(info->phy_id, mode);
@@ -55,7 +54,7 @@ static inline int s3fwrn5_set_mode(struct s3fwrn5_info *info,
static inline enum s3fwrn5_mode s3fwrn5_get_mode(struct s3fwrn5_info *info)
{
if (!info->phy_ops->get_mode)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return info->phy_ops->get_mode(info->phy_id);
}
@@ -63,7 +62,7 @@ static inline enum s3fwrn5_mode s3fwrn5_get_mode(struct s3fwrn5_info *info)
static inline int s3fwrn5_set_wake(struct s3fwrn5_info *info, bool wake)
{
if (!info->phy_ops->set_wake)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
info->phy_ops->set_wake(info->phy_id, wake);
@@ -73,13 +72,13 @@ static inline int s3fwrn5_set_wake(struct s3fwrn5_info *info, bool wake)
static inline int s3fwrn5_write(struct s3fwrn5_info *info, struct sk_buff *skb)
{
if (!info->phy_ops->write)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return info->phy_ops->write(info->phy_id, skb);
}
int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
- const struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload);
+ const struct s3fwrn5_phy_ops *phy_ops);
void s3fwrn5_remove(struct nci_dev *ndev);
int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index 179f6c472e50..c1c959f7e52b 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -21,6 +21,7 @@ MODULE_DESCRIPTION("Driver for IDT 82p33xxx clock devices");
MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FW_FILENAME);
/* Module Parameters */
static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
@@ -77,11 +78,10 @@ static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts,
}
}
-static int idt82p33_xfer(struct idt82p33 *idt82p33,
- unsigned char regaddr,
- unsigned char *buf,
- unsigned int count,
- int write)
+static int idt82p33_xfer_read(struct idt82p33 *idt82p33,
+ unsigned char regaddr,
+ unsigned char *buf,
+ unsigned int count)
{
struct i2c_client *client = idt82p33->client;
struct i2c_msg msg[2];
@@ -93,7 +93,7 @@ static int idt82p33_xfer(struct idt82p33 *idt82p33,
msg[0].buf = &regaddr;
msg[1].addr = client->addr;
- msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].flags = I2C_M_RD;
msg[1].len = count;
msg[1].buf = buf;
@@ -109,6 +109,31 @@ static int idt82p33_xfer(struct idt82p33 *idt82p33,
return 0;
}
+static int idt82p33_xfer_write(struct idt82p33 *idt82p33,
+ u8 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ struct i2c_client *client = idt82p33->client;
+ /* we add 1 byte for device register */
+ u8 msg[IDT82P33_MAX_WRITE_COUNT + 1];
+ int err;
+
+ if (count > IDT82P33_MAX_WRITE_COUNT)
+ return -EINVAL;
+
+ msg[0] = regaddr;
+ memcpy(&msg[1], buf, count);
+
+ err = i2c_master_send(client, msg, count + 1);
+ if (err < 0) {
+ dev_err(&client->dev, "i2c_master_send returned %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
{
int err;
@@ -116,7 +141,7 @@ static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
if (idt82p33->page_offset == val)
return 0;
- err = idt82p33_xfer(idt82p33, PAGE_ADDR, &val, sizeof(val), 1);
+ err = idt82p33_xfer_write(idt82p33, PAGE_ADDR, &val, sizeof(val));
if (err)
dev_err(&idt82p33->client->dev,
"failed to set page offset %d\n", val);
@@ -137,11 +162,12 @@ static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr,
err = idt82p33_page_offset(idt82p33, page);
if (err)
- goto out;
+ return err;
- err = idt82p33_xfer(idt82p33, offset, buf, count, write);
-out:
- return err;
+ if (write)
+ return idt82p33_xfer_write(idt82p33, offset, buf, count);
+
+ return idt82p33_xfer_read(idt82p33, offset, buf, count);
}
static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr,
@@ -294,7 +320,6 @@ static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
unsigned char buf[5] = {0};
- int neg_adj = 0;
int err, i;
s64 fcw;
@@ -314,16 +339,9 @@ static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
* FCW = -------------
* 168 * 2^4
*/
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
fcw = scaled_ppm * 244140625ULL;
- fcw = div_u64(fcw, 2688);
-
- if (neg_adj)
- fcw = -fcw;
+ fcw = div_s64(fcw, 2688);
for (i = 0; i < 5; i++) {
buf[i] = fcw & 0xff;
@@ -448,8 +466,11 @@ static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
return err;
+ }
err = idt82p33_measure_one_byte_write_overhead(channel,
&one_byte_write_ns);
@@ -518,13 +539,10 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
u8 sync_cnfg;
int err;
- if (enable == channel->sync_tod_on) {
- if (enable && sync_tod_timeout) {
- mod_delayed_work(system_wq, &channel->sync_tod_work,
- sync_tod_timeout * HZ);
- }
- return 0;
- }
+ /* Turn it off after sync_tod_timeout seconds */
+ if (enable && sync_tod_timeout)
+ ptp_schedule_worker(channel->ptp_clock,
+ sync_tod_timeout * HZ);
err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg,
&sync_cnfg, sizeof(sync_cnfg));
@@ -532,29 +550,17 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
return err;
sync_cnfg &= ~SYNC_TOD;
-
if (enable)
sync_cnfg |= SYNC_TOD;
- err = idt82p33_write(idt82p33, channel->dpll_sync_cnfg,
- &sync_cnfg, sizeof(sync_cnfg));
- if (err)
- return err;
-
- channel->sync_tod_on = enable;
-
- if (enable && sync_tod_timeout) {
- mod_delayed_work(system_wq, &channel->sync_tod_work,
- sync_tod_timeout * HZ);
- }
-
- return 0;
+ return idt82p33_write(idt82p33, channel->dpll_sync_cnfg,
+ &sync_cnfg, sizeof(sync_cnfg));
}
-static void idt82p33_sync_tod_work_handler(struct work_struct *work)
+static long idt82p33_sync_tod_work_handler(struct ptp_clock_info *ptp)
{
struct idt82p33_channel *channel =
- container_of(work, struct idt82p33_channel, sync_tod_work.work);
+ container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
mutex_lock(&idt82p33->reg_lock);
@@ -562,35 +568,46 @@ static void idt82p33_sync_tod_work_handler(struct work_struct *work)
(void)idt82p33_sync_tod(channel, false);
mutex_unlock(&idt82p33->reg_lock);
+
+ /* Return a negative value here to not reschedule */
+ return -1;
}
-static int idt82p33_pps_enable(struct idt82p33_channel *channel, bool enable)
+static int idt82p33_output_enable(struct idt82p33_channel *channel,
+ bool enable, unsigned int outn)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
- u8 mask, outn, val;
int err;
+ u8 val;
+
+ err = idt82p33_read(idt82p33, OUT_MUX_CNFG(outn), &val, sizeof(val));
+ if (err)
+ return err;
+ if (enable)
+ val &= ~SQUELCH_ENABLE;
+ else
+ val |= SQUELCH_ENABLE;
+
+ return idt82p33_write(idt82p33, OUT_MUX_CNFG(outn), &val, sizeof(val));
+}
+
+static int idt82p33_output_mask_enable(struct idt82p33_channel *channel,
+ bool enable)
+{
+ u16 mask;
+ int err;
+ u8 outn;
mask = channel->output_mask;
outn = 0;
while (mask) {
if (mask & 0x1) {
- err = idt82p33_read(idt82p33, OUT_MUX_CNFG(outn),
- &val, sizeof(val));
- if (err)
- return err;
-
- if (enable)
- val &= ~SQUELCH_ENABLE;
- else
- val |= SQUELCH_ENABLE;
-
- err = idt82p33_write(idt82p33, OUT_MUX_CNFG(outn),
- &val, sizeof(val));
-
+ err = idt82p33_output_enable(channel, enable, outn);
if (err)
return err;
}
+
mask >>= 0x1;
outn++;
}
@@ -598,6 +615,20 @@ static int idt82p33_pps_enable(struct idt82p33_channel *channel, bool enable)
return 0;
}
+static int idt82p33_perout_enable(struct idt82p33_channel *channel,
+ bool enable,
+ struct ptp_perout_request *perout)
+{
+ unsigned int flags = perout->flags;
+
+ /* Enable/disable output based on output_mask */
+ if (flags == PEROUT_ENABLE_OUTPUT_MASK)
+ return idt82p33_output_mask_enable(channel, enable);
+
+ /* Enable/disable individual output instead */
+ return idt82p33_output_enable(channel, enable, perout->index);
+}
+
static int idt82p33_enable_tod(struct idt82p33_channel *channel)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
@@ -611,15 +642,13 @@ static int idt82p33_enable_tod(struct idt82p33_channel *channel)
if (err)
return err;
- err = idt82p33_pps_enable(channel, false);
-
- if (err)
- return err;
-
err = idt82p33_measure_tod_write_overhead(channel);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
return err;
+ }
err = _idt82p33_settime(channel, &ts);
@@ -638,10 +667,8 @@ static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
channel = &idt82p33->channel[i];
- if (channel->ptp_clock) {
+ if (channel->ptp_clock)
ptp_clock_unregister(channel->ptp_clock);
- cancel_delayed_work_sync(&channel->sync_tod_work);
- }
}
}
@@ -659,14 +686,15 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
if (rq->type == PTP_CLK_REQ_PEROUT) {
if (!on)
- err = idt82p33_pps_enable(channel, false);
-
+ err = idt82p33_perout_enable(channel, false,
+ &rq->perout);
/* Only accept a 1-PPS aligned to the second. */
else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
rq->perout.period.nsec) {
err = -ERANGE;
} else
- err = idt82p33_pps_enable(channel, true);
+ err = idt82p33_perout_enable(channel, true,
+ &rq->perout);
}
mutex_unlock(&idt82p33->reg_lock);
@@ -674,6 +702,48 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
return err;
}
+static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ s64 offset_regval, offset_fs;
+ u8 val[4] = {0};
+ int err;
+
+ offset_fs = (s64)(-offset_ns) * 1000000;
+
+ if (offset_fs > WRITE_PHASE_OFFSET_LIMIT)
+ offset_fs = WRITE_PHASE_OFFSET_LIMIT;
+ else if (offset_fs < -WRITE_PHASE_OFFSET_LIMIT)
+ offset_fs = -WRITE_PHASE_OFFSET_LIMIT;
+
+ /* Convert from phaseoffset_fs to register value */
+ offset_regval = div_s64(offset_fs * 1000, IDT_T0DPLL_PHASE_RESOL);
+
+ val[0] = offset_regval & 0xFF;
+ val[1] = (offset_regval >> 8) & 0xFF;
+ val[2] = (offset_regval >> 16) & 0xFF;
+ val[3] = (offset_regval >> 24) & 0x1F;
+ val[3] |= PH_OFFSET_EN;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_WPH);
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
+ goto out;
+ }
+
+ err = idt82p33_write(idt82p33, channel->dpll_phase_cnfg, val,
+ sizeof(val));
+
+out:
+ mutex_unlock(&idt82p33->reg_lock);
+ return err;
+}
+
static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct idt82p33_channel *channel =
@@ -683,6 +753,9 @@ static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
mutex_lock(&idt82p33->reg_lock);
err = _idt82p33_adjfine(channel, scaled_ppm);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
mutex_unlock(&idt82p33->reg_lock);
return err;
@@ -706,10 +779,15 @@ static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
if (err) {
mutex_unlock(&idt82p33->reg_lock);
+ dev_err(&idt82p33->client->dev,
+ "Adjtime failed in %s with err %d!\n", __func__, err);
return err;
}
err = idt82p33_sync_tod(channel, true);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "Sync_tod failed in %s with err %d!\n", __func__, err);
mutex_unlock(&idt82p33->reg_lock);
@@ -725,6 +803,9 @@ static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
mutex_lock(&idt82p33->reg_lock);
err = _idt82p33_gettime(channel, ts);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
mutex_unlock(&idt82p33->reg_lock);
return err;
@@ -740,6 +821,9 @@ static int idt82p33_settime(struct ptp_clock_info *ptp,
mutex_lock(&idt82p33->reg_lock);
err = _idt82p33_settime(channel, ts);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
mutex_unlock(&idt82p33->reg_lock);
return err;
@@ -772,9 +856,6 @@ static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
return -EINVAL;
}
- INIT_DELAYED_WORK(&channel->sync_tod_work,
- idt82p33_sync_tod_work_handler);
- channel->sync_tod_on = false;
channel->current_freq_ppb = 0;
return 0;
@@ -784,11 +865,14 @@ static void idt82p33_caps_init(struct ptp_clock_info *caps)
{
caps->owner = THIS_MODULE;
caps->max_adj = 92000;
+ caps->n_per_out = 11;
+ caps->adjphase = idt82p33_adjwritephase;
caps->adjfine = idt82p33_adjfine;
caps->adjtime = idt82p33_adjtime;
caps->gettime64 = idt82p33_gettime;
caps->settime64 = idt82p33_settime;
caps->enable = idt82p33_enable;
+ caps->do_aux_work = idt82p33_sync_tod_work_handler;
}
static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
@@ -802,23 +886,18 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
channel = &idt82p33->channel[index];
err = idt82p33_channel_init(channel, index);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Channel_init failed in %s with err %d!\n",
+ __func__, err);
return err;
+ }
channel->idt82p33 = idt82p33;
idt82p33_caps_init(&channel->caps);
snprintf(channel->caps.name, sizeof(channel->caps.name),
"IDT 82P33 PLL%u", index);
- channel->caps.n_per_out = hweight8(channel->output_mask);
-
- err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
- if (err)
- return err;
-
- err = idt82p33_enable_tod(channel);
- if (err)
- return err;
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
@@ -831,6 +910,22 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Dpll_set_mode failed in %s with err %d!\n",
+ __func__, err);
+ return err;
+ }
+
+ err = idt82p33_enable_tod(channel);
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Enable_tod failed in %s with err %d!\n",
+ __func__, err);
+ return err;
+ }
+
dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n",
index, channel->ptp_clock->index);
@@ -850,8 +945,11 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
return err;
+ }
dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size);
@@ -935,8 +1033,12 @@ static int idt82p33_probe(struct i2c_client *client,
for (i = 0; i < MAX_PHC_PLL; i++) {
if (idt82p33->pll_mask & (1 << i)) {
err = idt82p33_enable_channel(idt82p33, i);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n",
+ __func__, err);
break;
+ }
}
}
} else {
diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h
index 9d46966d25f1..1c7a0f0872e8 100644
--- a/drivers/ptp/ptp_idt82p33.h
+++ b/drivers/ptp/ptp_idt82p33.h
@@ -56,6 +56,8 @@
#define PLL_MODE_SHIFT (0)
#define PLL_MODE_MASK (0x1F)
+#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
+
enum pll_mode {
PLL_MODE_MIN = 0,
PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
@@ -93,6 +95,7 @@ enum hw_tod_trig_sel {
#define MAX_MEASURMENT_COUNT (5)
#define SNAP_THRESHOLD_NS (150000)
#define SYNC_TOD_TIMEOUT_SEC (5)
+#define IDT82P33_MAX_WRITE_COUNT (512)
#define PLLMASK_ADDR_HI 0xFF
#define PLLMASK_ADDR_LO 0xA5
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f73b4756ed5e..32ea41b4356b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -701,6 +701,19 @@ enum qeth_pnso_mode {
QETH_PNSO_ADDR_INFO,
};
+enum qeth_link_mode {
+ QETH_LINK_MODE_UNKNOWN,
+ QETH_LINK_MODE_FIBRE_SHORT,
+ QETH_LINK_MODE_FIBRE_LONG,
+};
+
+struct qeth_link_info {
+ u32 speed;
+ u8 duplex;
+ u8 port;
+ enum qeth_link_mode link_mode;
+};
+
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
struct qeth_card_info {
@@ -732,6 +745,7 @@ struct qeth_card_info {
struct qeth_card_blkt blkt;
__u32 diagass_support;
__u32 hwtrap;
+ struct qeth_link_info link_info;
};
enum qeth_discipline_id {
@@ -796,12 +810,6 @@ struct qeth_rx {
u8 bufs_refill;
};
-struct carrier_info {
- __u8 card_type;
- __u16 port_mode;
- __u32 port_speed;
-};
-
struct qeth_switch_info {
__u32 capabilities;
__u32 settings;
@@ -1108,7 +1116,7 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
- struct carrier_info *carrier_info);
+ struct qeth_link_info *link_info);
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
enum qeth_ipa_isolation_modes mode);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 93c9b30ab17a..d9ba0586373b 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4868,8 +4868,8 @@ out_free:
static int qeth_query_card_info_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ struct qeth_link_info *link_info = reply->param;
struct qeth_query_card_info *card_info;
QETH_CARD_TEXT(card, 2, "qcrdincb");
@@ -4877,14 +4877,67 @@ static int qeth_query_card_info_cb(struct qeth_card *card,
return -EIO;
card_info = &cmd->data.setadapterparms.data.card_info;
- carrier_info->card_type = card_info->card_type;
- carrier_info->port_mode = card_info->port_mode;
- carrier_info->port_speed = card_info->port_speed;
+ netdev_dbg(card->dev,
+ "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
+ card_info->card_type, card_info->port_mode,
+ card_info->port_speed);
+
+ switch (card_info->port_mode) {
+ case CARD_INFO_PORTM_FULLDUPLEX:
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case CARD_INFO_PORTM_HALFDUPLEX:
+ link_info->duplex = DUPLEX_HALF;
+ break;
+ default:
+ link_info->duplex = DUPLEX_UNKNOWN;
+ }
+
+ switch (card_info->card_type) {
+ case CARD_INFO_TYPE_1G_COPPER_A:
+ case CARD_INFO_TYPE_1G_COPPER_B:
+ link_info->speed = SPEED_1000;
+ link_info->port = PORT_TP;
+ break;
+ case CARD_INFO_TYPE_1G_FIBRE_A:
+ case CARD_INFO_TYPE_1G_FIBRE_B:
+ link_info->speed = SPEED_1000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case CARD_INFO_TYPE_10G_FIBRE_A:
+ case CARD_INFO_TYPE_10G_FIBRE_B:
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ break;
+ default:
+ switch (card_info->port_speed) {
+ case CARD_INFO_PORTS_10M:
+ link_info->speed = SPEED_10;
+ break;
+ case CARD_INFO_PORTS_100M:
+ link_info->speed = SPEED_100;
+ break;
+ case CARD_INFO_PORTS_1G:
+ link_info->speed = SPEED_1000;
+ break;
+ case CARD_INFO_PORTS_10G:
+ link_info->speed = SPEED_10000;
+ break;
+ case CARD_INFO_PORTS_25G:
+ link_info->speed = SPEED_25000;
+ break;
+ default:
+ link_info->speed = SPEED_UNKNOWN;
+ }
+
+ link_info->port = PORT_OTHER;
+ }
+
return 0;
}
int qeth_query_card_info(struct qeth_card *card,
- struct carrier_info *carrier_info)
+ struct qeth_link_info *link_info)
{
struct qeth_cmd_buffer *iob;
@@ -4894,8 +4947,162 @@ int qeth_query_card_info(struct qeth_card *card,
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
if (!iob)
return -ENOMEM;
- return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
- (void *)carrier_info);
+
+ return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
+}
+
+static int qeth_init_link_info_oat_cb(struct qeth_card *card,
+ struct qeth_reply *reply_priv,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ struct qeth_link_info *link_info = reply_priv->param;
+ struct qeth_query_oat_physical_if *phys_if;
+ struct qeth_query_oat_reply *reply;
+
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return -EIO;
+
+ /* Multi-part reply is unexpected, don't bother: */
+ if (cmd->data.setadapterparms.hdr.used_total > 1)
+ return -EINVAL;
+
+ /* Expect the reply to start with phys_if data: */
+ reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
+ if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
+ reply->length < sizeof(*reply))
+ return -EINVAL;
+
+ phys_if = &reply->phys_if;
+
+ switch (phys_if->speed_duplex) {
+ case QETH_QOAT_PHYS_SPEED_10M_HALF:
+ link_info->speed = SPEED_10;
+ link_info->duplex = DUPLEX_HALF;
+ break;
+ case QETH_QOAT_PHYS_SPEED_10M_FULL:
+ link_info->speed = SPEED_10;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_100M_HALF:
+ link_info->speed = SPEED_100;
+ link_info->duplex = DUPLEX_HALF;
+ break;
+ case QETH_QOAT_PHYS_SPEED_100M_FULL:
+ link_info->speed = SPEED_100;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_1000M_HALF:
+ link_info->speed = SPEED_1000;
+ link_info->duplex = DUPLEX_HALF;
+ break;
+ case QETH_QOAT_PHYS_SPEED_1000M_FULL:
+ link_info->speed = SPEED_1000;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_10G_FULL:
+ link_info->speed = SPEED_10000;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_25G_FULL:
+ link_info->speed = SPEED_25000;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_UNKNOWN:
+ default:
+ link_info->speed = SPEED_UNKNOWN;
+ link_info->duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+
+ switch (phys_if->media_type) {
+ case QETH_QOAT_PHYS_MEDIA_COPPER:
+ link_info->port = PORT_TP;
+ link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
+ break;
+ case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
+ link_info->port = PORT_FIBRE;
+ link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
+ break;
+ case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
+ link_info->port = PORT_FIBRE;
+ link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
+ break;
+ default:
+ link_info->port = PORT_OTHER;
+ link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static void qeth_init_link_info(struct qeth_card *card)
+{
+ card->info.link_info.duplex = DUPLEX_FULL;
+
+ if (IS_IQD(card) || IS_VM_NIC(card)) {
+ card->info.link_info.speed = SPEED_10000;
+ card->info.link_info.port = PORT_FIBRE;
+ card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
+ } else {
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ case QETH_LINK_TYPE_LANE_ETH100:
+ card->info.link_info.speed = SPEED_100;
+ card->info.link_info.port = PORT_TP;
+ break;
+ case QETH_LINK_TYPE_GBIT_ETH:
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ card->info.link_info.speed = SPEED_1000;
+ card->info.link_info.port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ card->info.link_info.speed = SPEED_10000;
+ card->info.link_info.port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ card->info.link_info.speed = SPEED_25000;
+ card->info.link_info.port = PORT_FIBRE;
+ break;
+ default:
+ dev_info(&card->gdev->dev, "Unknown link type %x\n",
+ card->info.link_type);
+ card->info.link_info.speed = SPEED_UNKNOWN;
+ card->info.link_info.port = PORT_OTHER;
+ }
+
+ card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
+ }
+
+ /* Get more accurate data via QUERY OAT: */
+ if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
+ struct qeth_link_info link_info;
+ struct qeth_cmd_buffer *iob;
+
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
+ SETADP_DATA_SIZEOF(query_oat));
+ if (iob) {
+ struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
+ struct qeth_query_oat *oat_req;
+
+ oat_req = &cmd->data.setadapterparms.data.query_oat;
+ oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
+
+ if (!qeth_send_ipa_cmd(card, iob,
+ qeth_init_link_info_oat_cb,
+ &link_info)) {
+ if (link_info.speed != SPEED_UNKNOWN)
+ card->info.link_info.speed = link_info.speed;
+ if (link_info.duplex != DUPLEX_UNKNOWN)
+ card->info.link_info.duplex = link_info.duplex;
+ if (link_info.port != PORT_OTHER)
+ card->info.link_info.port = link_info.port;
+ if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
+ card->info.link_info.link_mode = link_info.link_mode;
+ }
+ }
+ }
}
/**
@@ -5282,6 +5489,8 @@ retriable:
goto out;
}
+ qeth_init_link_info(card);
+
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "9err%d", rc);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6541bab96822..e4bde7daf083 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -489,9 +489,45 @@ struct qeth_set_access_ctrl {
__u8 reserved[8];
} __attribute__((packed));
+#define QETH_QOAT_PHYS_SPEED_UNKNOWN 0x00
+#define QETH_QOAT_PHYS_SPEED_10M_HALF 0x01
+#define QETH_QOAT_PHYS_SPEED_10M_FULL 0x02
+#define QETH_QOAT_PHYS_SPEED_100M_HALF 0x03
+#define QETH_QOAT_PHYS_SPEED_100M_FULL 0x04
+#define QETH_QOAT_PHYS_SPEED_1000M_HALF 0x05
+#define QETH_QOAT_PHYS_SPEED_1000M_FULL 0x06
+// n/a 0x07
+#define QETH_QOAT_PHYS_SPEED_10G_FULL 0x08
+// n/a 0x09
+#define QETH_QOAT_PHYS_SPEED_25G_FULL 0x0A
+
+#define QETH_QOAT_PHYS_MEDIA_COPPER 0x01
+#define QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT 0x02
+#define QETH_QOAT_PHYS_MEDIA_FIBRE_LONG 0x04
+
+struct qeth_query_oat_physical_if {
+ u8 res_head[33];
+ u8 speed_duplex;
+ u8 media_type;
+ u8 res_tail[29];
+};
+
+#define QETH_QOAT_REPLY_TYPE_PHYS_IF 0x0004
+
+struct qeth_query_oat_reply {
+ u16 type;
+ u16 length;
+ u16 version;
+ u8 res[10];
+ struct qeth_query_oat_physical_if phys_if;
+};
+
+#define QETH_QOAT_SCOPE_INTERFACE 0x00000001
+
struct qeth_query_oat {
- __u32 subcmd_code;
- __u8 reserved[12];
+ u32 subcmd_code;
+ u8 reserved[12];
+ struct qeth_query_oat_reply reply[];
} __packed;
struct qeth_qoat_priv {
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index b5caa723326e..3a51bbff0ffe 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -324,8 +324,8 @@ static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue,
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
/* specified port type. */
-static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
- int maxspeed, int porttype)
+static void qeth_set_ethtool_link_modes(struct ethtool_link_ksettings *cmd,
+ enum qeth_link_mode link_mode)
{
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
@@ -334,186 +334,119 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
- switch (porttype) {
+ switch (cmd->base.port) {
case PORT_TP:
ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+ switch (cmd->base.speed) {
+ case SPEED_10000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseT_Full);
+ fallthrough;
+ case SPEED_1000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Half);
+ fallthrough;
+ case SPEED_100:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Half);
+ fallthrough;
+ case SPEED_10:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Half);
+ break;
+ default:
+ break;
+ }
+
break;
case PORT_FIBRE:
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
- break;
- default:
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
- WARN_ON_ONCE(1);
- }
- /* partially does fall through, to also select lower speeds */
- switch (maxspeed) {
- case SPEED_25000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 25000baseSR_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 25000baseSR_Full);
- break;
- case SPEED_10000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10000baseT_Full);
- fallthrough;
- case SPEED_1000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 1000baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 1000baseT_Half);
- fallthrough;
- case SPEED_100:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 100baseT_Half);
- fallthrough;
- case SPEED_10:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Half);
+ switch (cmd->base.speed) {
+ case SPEED_25000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 25000baseSR_Full);
+ break;
+ case SPEED_10000:
+ if (link_mode == QETH_LINK_MODE_FIBRE_LONG) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseLR_Full);
+ } else if (link_mode == QETH_LINK_MODE_FIBRE_SHORT) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseSR_Full);
+ }
+ break;
+ case SPEED_1000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseX_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseX_Full);
+ break;
+ default:
+ break;
+ }
+
break;
default:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Half);
- WARN_ON_ONCE(1);
+ break;
}
}
-
static int qeth_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct qeth_card *card = netdev->ml_priv;
- enum qeth_link_types link_type;
- struct carrier_info carrier_info;
- int rc;
+ struct qeth_link_info link_info;
- if (IS_IQD(card) || IS_VM_NIC(card))
- link_type = QETH_LINK_TYPE_10GBIT_ETH;
- else
- link_type = card->info.link_type;
-
- cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = card->info.link_info.speed;
+ cmd->base.duplex = card->info.link_info.duplex;
+ cmd->base.port = card->info.link_info.port;
cmd->base.autoneg = AUTONEG_ENABLE;
cmd->base.phy_address = 0;
cmd->base.mdio_support = 0;
cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- switch (link_type) {
- case QETH_LINK_TYPE_FAST_ETH:
- case QETH_LINK_TYPE_LANE_ETH100:
- cmd->base.speed = SPEED_100;
- cmd->base.port = PORT_TP;
- break;
- case QETH_LINK_TYPE_GBIT_ETH:
- case QETH_LINK_TYPE_LANE_ETH1000:
- cmd->base.speed = SPEED_1000;
- cmd->base.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_10GBIT_ETH:
- cmd->base.speed = SPEED_10000;
- cmd->base.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_25GBIT_ETH:
- cmd->base.speed = SPEED_25000;
- cmd->base.port = PORT_FIBRE;
- break;
- default:
- cmd->base.speed = SPEED_10;
- cmd->base.port = PORT_TP;
- }
- qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
-
/* Check if we can obtain more accurate information. */
- /* If QUERY_CARD_INFO command is not supported or fails, */
- /* just return the heuristics that was filled above. */
- rc = qeth_query_card_info(card, &carrier_info);
- if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
- return 0;
- if (rc) /* report error from the hardware operation */
- return rc;
- /* on success, fill in the information got from the hardware */
-
- netdev_dbg(netdev,
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
- carrier_info.card_type,
- carrier_info.port_mode,
- carrier_info.port_speed);
-
- /* Update attributes for which we've obtained more authoritative */
- /* information, leave the rest the way they where filled above. */
- switch (carrier_info.card_type) {
- case CARD_INFO_TYPE_1G_COPPER_A:
- case CARD_INFO_TYPE_1G_COPPER_B:
- cmd->base.port = PORT_TP;
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
- break;
- case CARD_INFO_TYPE_1G_FIBRE_A:
- case CARD_INFO_TYPE_1G_FIBRE_B:
- cmd->base.port = PORT_FIBRE;
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
- break;
- case CARD_INFO_TYPE_10G_FIBRE_A:
- case CARD_INFO_TYPE_10G_FIBRE_B:
- cmd->base.port = PORT_FIBRE;
- qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
- break;
- }
-
- switch (carrier_info.port_mode) {
- case CARD_INFO_PORTM_FULLDUPLEX:
- cmd->base.duplex = DUPLEX_FULL;
- break;
- case CARD_INFO_PORTM_HALFDUPLEX:
- cmd->base.duplex = DUPLEX_HALF;
- break;
+ if (!qeth_query_card_info(card, &link_info)) {
+ if (link_info.speed != SPEED_UNKNOWN)
+ cmd->base.speed = link_info.speed;
+ if (link_info.duplex != DUPLEX_UNKNOWN)
+ cmd->base.duplex = link_info.duplex;
+ if (link_info.port != PORT_OTHER)
+ cmd->base.port = link_info.port;
}
- switch (carrier_info.port_speed) {
- case CARD_INFO_PORTS_10M:
- cmd->base.speed = SPEED_10;
- break;
- case CARD_INFO_PORTS_100M:
- cmd->base.speed = SPEED_100;
- break;
- case CARD_INFO_PORTS_1G:
- cmd->base.speed = SPEED_1000;
- break;
- case CARD_INFO_PORTS_10G:
- cmd->base.speed = SPEED_10000;
- break;
- case CARD_INFO_PORTS_25G:
- cmd->base.speed = SPEED_25000;
- break;
- }
+ qeth_set_ethtool_link_modes(cmd, card->info.link_info.link_mode);
return 0;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 28f6dda95736..daeb837abec3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -737,8 +737,6 @@ static void qeth_l2_dev2br_an_set_cb(void *priv,
*
* On enable, emits a series of address notifications for all
* currently registered hosts.
- *
- * Must be called under rtnl_lock
*/
static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
{
@@ -1289,16 +1287,19 @@ static void qeth_l2_dev2br_worker(struct work_struct *work)
if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
goto free;
- /* Potential re-config in progress, try again later: */
- if (!rtnl_trylock()) {
- queue_delayed_work(card->event_wq, dwork,
- msecs_to_jiffies(100));
- return;
- }
- if (!netif_device_present(card->dev))
- goto out_unlock;
-
if (data->ac_event.lost_event_mask) {
+ /* Potential re-config in progress, try again later: */
+ if (!rtnl_trylock()) {
+ queue_delayed_work(card->event_wq, dwork,
+ msecs_to_jiffies(100));
+ return;
+ }
+
+ if (!netif_device_present(card->dev)) {
+ rtnl_unlock();
+ goto free;
+ }
+
QETH_DBF_MESSAGE(3,
"Address change notification overflow on device %x\n",
CARD_DEVID(card));
@@ -1328,6 +1329,8 @@ static void qeth_l2_dev2br_worker(struct work_struct *work)
"Address Notification resynced on device %x\n",
CARD_DEVID(card));
}
+
+ rtnl_unlock();
} else {
for (i = 0; i < data->ac_event.num_entries; i++) {
struct qeth_ipacmd_addr_change_entry *entry =
@@ -1339,9 +1342,6 @@ static void qeth_l2_dev2br_worker(struct work_struct *work)
}
}
-out_unlock:
- rtnl_unlock();
-
free:
kfree(data);
}
@@ -2310,11 +2310,8 @@ static void qeth_l2_set_offline(struct qeth_card *card)
card->state = CARD_STATE_DOWN;
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
- if (priv->brport_features & BR_LEARNING_SYNC) {
- rtnl_lock();
+ if (priv->brport_features & BR_LEARNING_SYNC)
qeth_l2_dev2br_fdb_flush(card);
- rtnl_unlock();
- }
}
/* Returns zero if the command is successfully "consumed" */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b1c1d2510d55..264b6c782382 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -104,10 +104,7 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
(ipatoe->proto == QETH_PROT_IPV4) ?
4 : 16);
- if (addr->proto == QETH_PROT_IPV4)
- rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
- else
- rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
+ rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
if (rc)
break;
}
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 9888a7061873..101def7dc73d 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1159,7 +1159,7 @@ static u32 fq_to_tag(struct qman_fq *fq)
static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
- unsigned int poll_limit);
+ unsigned int poll_limit, bool sched_napi);
static void qm_congestion_task(struct work_struct *work);
static void qm_mr_process_task(struct work_struct *work);
@@ -1174,7 +1174,7 @@ static irqreturn_t portal_isr(int irq, void *ptr)
/* DQRR-handling if it's interrupt-driven */
if (is & QM_PIRQ_DQRI) {
- __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
}
/* Handling of anything else that's interrupt-driven */
@@ -1602,7 +1602,7 @@ static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
* user callbacks to call into any QMan API.
*/
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
- unsigned int poll_limit)
+ unsigned int poll_limit, bool sched_napi)
{
const struct qm_dqrr_entry *dq;
struct qman_fq *fq;
@@ -1636,7 +1636,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
* and we don't want multiple if()s in the critical
* path (SDQCR).
*/
- res = fq->cb.dqrr(p, fq, dq);
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
if (res == qman_cb_dqrr_stop)
break;
/* Check for VDQCR completion */
@@ -1646,7 +1646,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
/* SDQCR: context_b points to the FQ */
fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */
- res = fq->cb.dqrr(p, fq, dq);
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
/*
* The callback can request that we exit without
* consuming this entry nor advancing;
@@ -1753,7 +1753,7 @@ EXPORT_SYMBOL(qman_start_using_portal);
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
- return __poll_portal_fast(p, limit);
+ return __poll_portal_fast(p, limit, false);
}
EXPORT_SYMBOL(qman_p_poll_dqrr);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 7066b2f1467c..28fbddc3c204 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -45,7 +45,8 @@
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
struct qman_fq *,
- const struct qm_dqrr_entry *);
+ const struct qm_dqrr_entry *,
+ bool sched_napi);
static void cb_ern(struct qman_portal *, struct qman_fq *,
const union qm_mr_entry *);
static void cb_fqs(struct qman_portal *, struct qman_fq *,
@@ -208,7 +209,8 @@ failed:
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index e87b65403b67..b7e8e5ec884c 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -275,7 +275,8 @@ static inline int process_frame_data(struct hp_handler *handler,
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct hp_handler *handler = (struct hp_handler *)fq;
@@ -293,7 +294,8 @@ skip:
static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct hp_handler *handler = (struct hp_handler *)fq;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 2d0310448eba..443ca3f3cdf0 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,6 +114,8 @@ source "drivers/staging/kpc2000/Kconfig"
source "drivers/staging/qlge/Kconfig"
+source "drivers/staging/wimax/Kconfig"
+
source "drivers/staging/wfx/Kconfig"
source "drivers/staging/hikey9xx/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 757a892ab5b9..dc45128ef525 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,5 +47,6 @@ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_WFX) += wfx/
obj-y += hikey9xx/
diff --git a/drivers/staging/wimax/Documentation/i2400m.rst b/drivers/staging/wimax/Documentation/i2400m.rst
new file mode 100644
index 000000000000..194388c0c351
--- /dev/null
+++ b/drivers/staging/wimax/Documentation/i2400m.rst
@@ -0,0 +1,283 @@
+.. include:: <isonum.txt>
+
+====================================================
+Driver for the Intel Wireless Wimax Connection 2400m
+====================================================
+
+:Copyright: |copy| 2008 Intel Corporation < linux-wimax@intel.com >
+
+ This provides a driver for the Intel Wireless WiMAX Connection 2400m
+ and a basic Linux kernel WiMAX stack.
+
+1. Requirements
+===============
+
+ * Linux installation with Linux kernel 2.6.22 or newer (if building
+ from a separate tree)
+ * Intel i2400m Echo Peak or Baxter Peak; this includes the Intel
+ Wireless WiMAX/WiFi Link 5x50 series.
+ * build tools:
+
+ + Linux kernel development package for the target kernel; to
+ build against your currently running kernel, you need to have
+ the kernel development package corresponding to the running
+ image installed (usually if your kernel is named
+ linux-VERSION, the development package is called
+ linux-dev-VERSION or linux-headers-VERSION).
+ + GNU C Compiler, make
+
+2. Compilation and installation
+===============================
+
+2.1. Compilation of the drivers included in the kernel
+------------------------------------------------------
+
+ Configure the kernel; to enable the WiMAX drivers select Drivers >
+ Networking Drivers > WiMAX device support. Enable all of them as
+ modules (easier).
+
+ If USB or SDIO are not enabled in the kernel configuration, the options
+ to build the i2400m USB or SDIO drivers will not show. Enable said
+ subsystems and go back to the WiMAX menu to enable the drivers.
+
+ Compile and install your kernel as usual.
+
+2.2. Compilation of the drivers distributed as an standalone module
+-------------------------------------------------------------------
+
+ To compile::
+
+ $ cd source/directory
+ $ make
+
+ Once built you can load and unload using the provided load.sh script;
+ load.sh will load the modules, load.sh u will unload them.
+
+ To install in the default kernel directories (and enable auto loading
+ when the device is plugged)::
+
+ $ make install
+ $ depmod -a
+
+ If your kernel development files are located in a non standard
+ directory or if you want to build for a kernel that is not the
+ currently running one, set KDIR to the right location::
+
+ $ make KDIR=/path/to/kernel/dev/tree
+
+ For more information, please contact linux-wimax@intel.com.
+
+3. Installing the firmware
+--------------------------
+
+ The firmware can be obtained from http://linuxwimax.org or might have
+ been supplied with your hardware.
+
+ It has to be installed in the target system::
+
+ $ cp FIRMWAREFILE.sbcf /lib/firmware/i2400m-fw-BUSTYPE-1.3.sbcf
+
+ * NOTE: if your firmware came in an .rpm or .deb file, just install
+ it as normal, with the rpm (rpm -i FIRMWARE.rpm) or dpkg
+ (dpkg -i FIRMWARE.deb) commands. No further action is needed.
+ * BUSTYPE will be usb or sdio, depending on the hardware you have.
+ Each hardware type comes with its own firmware and will not work
+ with other types.
+
+4. Design
+=========
+
+ This package contains two major parts: a WiMAX kernel stack and a
+ driver for the Intel i2400m.
+
+ The WiMAX stack is designed to provide for common WiMAX control
+ services to current and future WiMAX devices from any vendor; please
+ see README.wimax for details.
+
+ The i2400m kernel driver is broken up in two main parts: the bus
+ generic driver and the bus-specific drivers. The bus generic driver
+ forms the drivercore and contain no knowledge of the actual method we
+ use to connect to the device. The bus specific drivers are just the
+ glue to connect the bus-generic driver and the device. Currently only
+ USB and SDIO are supported. See drivers/net/wimax/i2400m/i2400m.h for
+ more information.
+
+ The bus generic driver is logically broken up in two parts: OS-glue and
+ hardware-glue. The OS-glue interfaces with Linux. The hardware-glue
+ interfaces with the device on using an interface provided by the
+ bus-specific driver. The reason for this breakup is to be able to
+ easily reuse the hardware-glue to write drivers for other OSes; note
+ the hardware glue part is written as a native Linux driver; no
+ abstraction layers are used, so to port to another OS, the Linux kernel
+ API calls should be replaced with the target OS's.
+
+5. Usage
+========
+
+ To load the driver, follow the instructions in the install section;
+ once the driver is loaded, plug in the device (unless it is permanently
+ plugged in). The driver will enumerate the device, upload the firmware
+ and output messages in the kernel log (dmesg, /var/log/messages or
+ /var/log/kern.log) such as::
+
+ ...
+ i2400m_usb 5-4:1.0: firmware interface version 8.0.0
+ i2400m_usb 5-4:1.0: WiMAX interface wmx0 (00:1d:e1:01:94:2c) ready
+
+ At this point the device is ready to work.
+
+ Current versions require the Intel WiMAX Network Service in userspace
+ to make things work. See the network service's README for instructions
+ on how to scan, connect and disconnect.
+
+5.1. Module parameters
+----------------------
+
+ Module parameters can be set at kernel or module load time or by
+ echoing values::
+
+ $ echo VALUE > /sys/module/MODULENAME/parameters/PARAMETERNAME
+
+ To make changes permanent, for example, for the i2400m module, you can
+ also create a file named /etc/modprobe.d/i2400m containing::
+
+ options i2400m idle_mode_disabled=1
+
+ To find which parameters are supported by a module, run::
+
+ $ modinfo path/to/module.ko
+
+ During kernel bootup (if the driver is linked in the kernel), specify
+ the following to the kernel command line::
+
+ i2400m.PARAMETER=VALUE
+
+5.1.1. i2400m: idle_mode_disabled
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The i2400m module supports a parameter to disable idle mode. This
+ parameter, once set, will take effect only when the device is
+ reinitialized by the driver (eg: following a reset or a reconnect).
+
+5.2. Debug operations: debugfs entries
+--------------------------------------
+
+ The driver will register debugfs entries that allow the user to tweak
+ debug settings. There are three main container directories where
+ entries are placed, which correspond to the three blocks a i2400m WiMAX
+ driver has:
+
+ * /sys/kernel/debug/wimax:DEVNAME/ for the generic WiMAX stack
+ controls
+ * /sys/kernel/debug/wimax:DEVNAME/i2400m for the i2400m generic
+ driver controls
+ * /sys/kernel/debug/wimax:DEVNAME/i2400m-usb (or -sdio) for the
+ bus-specific i2400m-usb or i2400m-sdio controls).
+
+ Of course, if debugfs is mounted in a directory other than
+ /sys/kernel/debug, those paths will change.
+
+5.2.1. Increasing debug output
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The files named *dl_* indicate knobs for controlling the debug output
+ of different submodules::
+
+ # find /sys/kernel/debug/wimax\:wmx0 -name \*dl_\*
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_tx
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_rx
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_notif
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_fw
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_usb
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_tx
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_rx
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_rfkill
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_netdev
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_fw
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_debugfs
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_driver
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_control
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_stack
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_rfkill
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_reset
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_msg
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_debugfs
+
+ By reading the file you can obtain the current value of said debug
+ level; by writing to it, you can set it.
+
+ To increase the debug level of, for example, the i2400m's generic TX
+ engine, just write::
+
+ $ echo 3 > /sys/kernel/debug/wimax:wmx0/i2400m/dl_tx
+
+ Increasing numbers yield increasing debug information; for details of
+ what is printed and the available levels, check the source. The code
+ uses 0 for disabled and increasing values until 8.
+
+5.2.2. RX and TX statistics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The i2400m/rx_stats and i2400m/tx_stats provide statistics about the
+ data reception/delivery from the device::
+
+ $ cat /sys/kernel/debug/wimax:wmx0/i2400m/rx_stats
+ 45 1 3 34 3104 48 480
+
+ The numbers reported are:
+
+ * packets/RX-buffer: total, min, max
+ * RX-buffers: total RX buffers received, accumulated RX buffer size
+ in bytes, min size received, max size received
+
+ Thus, to find the average buffer size received, divide accumulated
+ RX-buffer / total RX-buffers.
+
+ To clear the statistics back to 0, write anything to the rx_stats file::
+
+ $ echo 1 > /sys/kernel/debug/wimax:wmx0/i2400m_rx_stats
+
+ Likewise for TX.
+
+ Note the packets this debug file refers to are not network packet, but
+ packets in the sense of the device-specific protocol for communication
+ to the host. See drivers/net/wimax/i2400m/tx.c.
+
+5.2.3. Tracing messages received from user space
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ To echo messages received from user space into the trace pipe that the
+ i2400m driver creates, set the debug file i2400m/trace_msg_from_user to
+ 1::
+
+ $ echo 1 > /sys/kernel/debug/wimax:wmx0/i2400m/trace_msg_from_user
+
+5.2.4. Performing a device reset
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ By writing a 0, a 1 or a 2 to the file
+ /sys/kernel/debug/wimax:wmx0/reset, the driver performs a warm (without
+ disconnecting from the bus), cold (disconnecting from the bus) or bus
+ (bus specific) reset on the device.
+
+5.2.5. Asking the device to enter power saving mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ By writing any value to the /sys/kernel/debug/wimax:wmx0 file, the
+ device will attempt to enter power saving mode.
+
+6. Troubleshooting
+==================
+
+6.1. Driver complains about ``i2400m-fw-usb-1.2.sbcf: request failed``
+----------------------------------------------------------------------
+
+ If upon connecting the device, the following is output in the kernel
+ log::
+
+ i2400m_usb 5-4:1.0: fw i2400m-fw-usb-1.3.sbcf: request failed: -2
+
+ This means that the driver cannot locate the firmware file named
+ /lib/firmware/i2400m-fw-usb-1.2.sbcf. Check that the file is present in
+ the right location.
diff --git a/drivers/staging/wimax/Documentation/index.rst b/drivers/staging/wimax/Documentation/index.rst
new file mode 100644
index 000000000000..fdf7c1f99ff5
--- /dev/null
+++ b/drivers/staging/wimax/Documentation/index.rst
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+WiMAX subsystem
+===============
+
+.. toctree::
+ :maxdepth: 2
+
+ wimax
+
+ i2400m
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/drivers/staging/wimax/Documentation/wimax.rst b/drivers/staging/wimax/Documentation/wimax.rst
new file mode 100644
index 000000000000..817ee8ba2732
--- /dev/null
+++ b/drivers/staging/wimax/Documentation/wimax.rst
@@ -0,0 +1,89 @@
+.. include:: <isonum.txt>
+
+========================
+Linux kernel WiMAX stack
+========================
+
+:Copyright: |copy| 2008 Intel Corporation < linux-wimax@intel.com >
+
+ This provides a basic Linux kernel WiMAX stack to provide a common
+ control API for WiMAX devices, usable from kernel and user space.
+
+1. Design
+=========
+
+ The WiMAX stack is designed to provide for common WiMAX control
+ services to current and future WiMAX devices from any vendor.
+
+ Because currently there is only one and we don't know what would be the
+ common services, the APIs it currently provides are very minimal.
+ However, it is done in such a way that it is easily extensible to
+ accommodate future requirements.
+
+ The stack works by embedding a struct wimax_dev in your device's
+ control structures. This provides a set of callbacks that the WiMAX
+ stack will call in order to implement control operations requested by
+ the user. As well, the stack provides API functions that the driver
+ calls to notify about changes of state in the device.
+
+ The stack exports the API calls needed to control the device to user
+ space using generic netlink as a marshalling mechanism. You can access
+ them using your own code or use the wrappers provided for your
+ convenience in libwimax (in the wimax-tools package).
+
+ For detailed information on the stack, please see
+ include/linux/wimax.h.
+
+2. Usage
+========
+
+ For usage in a driver (registration, API, etc) please refer to the
+ instructions in the header file include/linux/wimax.h.
+
+ When a device is registered with the WiMAX stack, a set of debugfs
+ files will appear in /sys/kernel/debug/wimax:wmxX can tweak for
+ control.
+
+2.1. Obtaining debug information: debugfs entries
+-------------------------------------------------
+
+ The WiMAX stack is compiled, by default, with debug messages that can
+ be used to diagnose issues. By default, said messages are disabled.
+
+ The drivers will register debugfs entries that allow the user to tweak
+ debug settings.
+
+ Each driver, when registering with the stack, will cause a debugfs
+ directory named wimax:DEVICENAME to be created; optionally, it might
+ create more subentries below it.
+
+2.1.1. Increasing debug output
+------------------------------
+
+ The files named *dl_* indicate knobs for controlling the debug output
+ of different submodules of the WiMAX stack::
+
+ # find /sys/kernel/debug/wimax\:wmx0 -name \*dl_\*
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_stack
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_rfkill
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_reset
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_msg
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_debugfs
+ /sys/kernel/debug/wimax:wmx0/.... # other driver specific files
+
+ NOTE:
+ Of course, if debugfs is mounted in a directory other than
+ /sys/kernel/debug, those paths will change.
+
+ By reading the file you can obtain the current value of said debug
+ level; by writing to it, you can set it.
+
+ To increase the debug level of, for example, the id-table submodule,
+ just write:
+
+ $ echo 3 > /sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+
+ Increasing numbers yield increasing debug information; for details of
+ what is printed and the available levels, check the source. The code
+ uses 0 for disabled and increasing values until 8.
diff --git a/drivers/staging/wimax/Kconfig b/drivers/staging/wimax/Kconfig
new file mode 100644
index 000000000000..ded8b70b25ee
--- /dev/null
+++ b/drivers/staging/wimax/Kconfig
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# WiMAX LAN device configuration
+#
+
+menuconfig WIMAX
+ tristate "WiMAX Wireless Broadband support"
+ depends on RFKILL || !RFKILL
+ help
+
+ Select to configure support for devices that provide
+ wireless broadband connectivity using the WiMAX protocol
+ (IEEE 802.16).
+
+ Please note that most of these devices require signing up
+ for a service plan with a provider.
+
+ The different WiMAX drivers can be enabled in the menu entry
+
+ Device Drivers > Network device support > WiMAX Wireless
+ Broadband devices
+
+ If unsure, it is safe to select M (module).
+
+if WIMAX
+
+config WIMAX_DEBUG_LEVEL
+ int "WiMAX debug level"
+ depends on WIMAX
+ default 8
+ help
+
+ Select the maximum debug verbosity level to be compiled into
+ the WiMAX stack code.
+
+ By default, debug messages are disabled at runtime and can
+ be selectively enabled for different parts of the code using
+ the sysfs debug-levels file.
+
+ If set at zero, this will compile out all the debug code.
+
+ It is recommended that it is left at 8.
+
+source "drivers/staging/wimax/i2400m/Kconfig"
+
+endif
diff --git a/drivers/staging/wimax/Makefile b/drivers/staging/wimax/Makefile
new file mode 100644
index 000000000000..0e3f988656aa
--- /dev/null
+++ b/drivers/staging/wimax/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_WIMAX) += wimax.o
+
+wimax-y := \
+ id-table.o \
+ op-msg.o \
+ op-reset.o \
+ op-rfkill.o \
+ op-state-get.o \
+ stack.o
+
+wimax-$(CONFIG_DEBUG_FS) += debugfs.o
+
+obj-$(CONFIG_WIMAX_I2400M) += i2400m/
diff --git a/drivers/staging/wimax/TODO b/drivers/staging/wimax/TODO
new file mode 100644
index 000000000000..26e4cb9e9599
--- /dev/null
+++ b/drivers/staging/wimax/TODO
@@ -0,0 +1,18 @@
+There are no known users of this driver as of October 2020, and it will
+be removed unless someone turns out to still need it in future releases.
+
+According to https://en.wikipedia.org/wiki/List_of_WiMAX_networks, there
+have been many public wimax networks, but it appears that many of these
+have migrated to LTE or discontinued their service altogether. As most
+PCs and phones lack WiMAX hardware support, the remaining networks tend
+to use standalone routers. These almost certainly run Linux, but not a
+modern kernel or the mainline wimax driver stack.
+
+NetworkManager appears to have dropped userspace support in 2015
+https://bugzilla.gnome.org/show_bug.cgi?id=747846, the www.linuxwimax.org
+site had already shut down earlier.
+
+WiMax is apparently still being deployed on airport campus networks
+("AeroMACS"), but in a frequency band that was not supported by the old
+Intel 2400m (used in Sandy Bridge laptops and earlier), which is the
+only driver using the kernel's wimax stack.
diff --git a/drivers/staging/wimax/debug-levels.h b/drivers/staging/wimax/debug-levels.h
new file mode 100644
index 000000000000..b854802d1d00
--- /dev/null
+++ b/drivers/staging/wimax/debug-levels.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux WiMAX Stack
+ * Debug levels control file for the wimax module
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME wimax
+#define D_MASTER CONFIG_WIMAX_DEBUG_LEVEL
+
+#include "linux-wimax-debug.h"
+
+/* List of all the enabled modules */
+enum d_module {
+ D_SUBMODULE_DECLARE(debugfs),
+ D_SUBMODULE_DECLARE(id_table),
+ D_SUBMODULE_DECLARE(op_msg),
+ D_SUBMODULE_DECLARE(op_reset),
+ D_SUBMODULE_DECLARE(op_rfkill),
+ D_SUBMODULE_DECLARE(op_state_get),
+ D_SUBMODULE_DECLARE(stack),
+};
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/staging/wimax/debugfs.c b/drivers/staging/wimax/debugfs.c
new file mode 100644
index 000000000000..e11bff61ffcf
--- /dev/null
+++ b/drivers/staging/wimax/debugfs.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Debugfs support
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+#include <linux/debugfs.h>
+#include "linux-wimax.h"
+#include "wimax-internal.h"
+
+#define D_SUBMODULE debugfs
+#include "debug-levels.h"
+
+void wimax_debugfs_add(struct wimax_dev *wimax_dev)
+{
+ struct net_device *net_dev = wimax_dev->net_dev;
+ struct dentry *dentry;
+ char buf[128];
+
+ snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name);
+ dentry = debugfs_create_dir(buf, NULL);
+ wimax_dev->debugfs_dentry = dentry;
+
+ d_level_register_debugfs("wimax_dl_", debugfs, dentry);
+ d_level_register_debugfs("wimax_dl_", id_table, dentry);
+ d_level_register_debugfs("wimax_dl_", op_msg, dentry);
+ d_level_register_debugfs("wimax_dl_", op_reset, dentry);
+ d_level_register_debugfs("wimax_dl_", op_rfkill, dentry);
+ d_level_register_debugfs("wimax_dl_", op_state_get, dentry);
+ d_level_register_debugfs("wimax_dl_", stack, dentry);
+}
+
+void wimax_debugfs_rm(struct wimax_dev *wimax_dev)
+{
+ debugfs_remove_recursive(wimax_dev->debugfs_dentry);
+}
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/staging/wimax/i2400m/Kconfig
index 843b905a26a3..843b905a26a3 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/staging/wimax/i2400m/Kconfig
diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/staging/wimax/i2400m/Makefile
index b1db1eff0648..b1db1eff0648 100644
--- a/drivers/net/wimax/i2400m/Makefile
+++ b/drivers/staging/wimax/i2400m/Makefile
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/staging/wimax/i2400m/control.c
index 8df98757d901..fe885aa56cf3 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/staging/wimax/i2400m/control.c
@@ -77,7 +77,7 @@
#include "i2400m.h"
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/wimax/i2400m.h>
+#include "linux-wimax-i2400m.h"
#include <linux/export.h>
#include <linux/moduleparam.h>
diff --git a/drivers/net/wimax/i2400m/debug-levels.h b/drivers/staging/wimax/i2400m/debug-levels.h
index 00942bb1489b..a317e9fbb734 100644
--- a/drivers/net/wimax/i2400m/debug-levels.h
+++ b/drivers/staging/wimax/i2400m/debug-levels.h
@@ -13,7 +13,7 @@
#define D_MODULENAME i2400m
#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
-#include <linux/wimax/debug.h>
+#include "../linux-wimax-debug.h"
/* List of all the enabled modules */
enum d_module {
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/staging/wimax/i2400m/debugfs.c
index 1c640b41ea4c..1c640b41ea4c 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/staging/wimax/i2400m/debugfs.c
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/staging/wimax/i2400m/driver.c
index ecb3fccca603..dc8939ff78c0 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/staging/wimax/i2400m/driver.c
@@ -50,7 +50,7 @@
*/
#include "i2400m.h"
#include <linux/etherdevice.h>
-#include <linux/wimax/i2400m.h>
+#include "linux-wimax-i2400m.h"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/suspend.h>
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/staging/wimax/i2400m/fw.c
index 6c9a41bff2e0..6c9a41bff2e0 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/staging/wimax/i2400m/fw.c
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/staging/wimax/i2400m/i2400m-usb.h
index eff4f464a23e..eff4f464a23e 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/staging/wimax/i2400m/i2400m-usb.h
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/staging/wimax/i2400m/i2400m.h
index a3733a6d14f5..de22cc6f2c5c 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/staging/wimax/i2400m/i2400m.h
@@ -156,8 +156,8 @@
#include <linux/completion.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
-#include <net/wimax.h>
-#include <linux/wimax/i2400m.h>
+#include "../net-wimax.h"
+#include "linux-wimax-i2400m.h"
#include <asm/byteorder.h>
enum {
diff --git a/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h b/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h
new file mode 100644
index 000000000000..fd198bc24a3c
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h
@@ -0,0 +1,572 @@
+/*
+ * Intel Wireless WiMax Connection 2400m
+ * Host-Device protocol interface definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ *
+ * This header defines the data structures and constants used to
+ * communicate with the device.
+ *
+ * BOOTMODE/BOOTROM/FIRMWARE UPLOAD PROTOCOL
+ *
+ * The firmware upload protocol is quite simple and only requires a
+ * handful of commands. See drivers/net/wimax/i2400m/fw.c for more
+ * details.
+ *
+ * The BCF data structure is for the firmware file header.
+ *
+ *
+ * THE DATA / CONTROL PROTOCOL
+ *
+ * This is the normal protocol spoken with the device once the
+ * firmware is uploaded. It transports data payloads and control
+ * messages back and forth.
+ *
+ * It consists 'messages' that pack one or more payloads each. The
+ * format is described in detail in drivers/net/wimax/i2400m/rx.c and
+ * tx.c.
+ *
+ *
+ * THE L3L4 PROTOCOL
+ *
+ * The term L3L4 refers to Layer 3 (the device), Layer 4 (the
+ * driver/host software).
+ *
+ * This is the control protocol used by the host to control the i2400m
+ * device (scan, connect, disconnect...). This is sent to / received
+ * as control frames. These frames consist of a header and zero or
+ * more TLVs with information. We call each control frame a "message".
+ *
+ * Each message is composed of:
+ *
+ * HEADER
+ * [TLV0 + PAYLOAD0]
+ * [TLV1 + PAYLOAD1]
+ * [...]
+ * [TLVN + PAYLOADN]
+ *
+ * The HEADER is defined by 'struct i2400m_l3l4_hdr'. The payloads are
+ * defined by a TLV structure (Type Length Value) which is a 'header'
+ * (struct i2400m_tlv_hdr) and then the payload.
+ *
+ * All integers are represented as Little Endian.
+ *
+ * - REQUESTS AND EVENTS
+ *
+ * The requests can be clasified as follows:
+ *
+ * COMMAND: implies a request from the host to the device requesting
+ * an action being performed. The device will reply with a
+ * message (with the same type as the command), status and
+ * no (TLV) payload. Execution of a command might cause
+ * events (of different type) to be sent later on as
+ * device's state changes.
+ *
+ * GET/SET: similar to COMMAND, but will not cause other
+ * EVENTs. The reply, in the case of GET, will contain
+ * TLVs with the requested information.
+ *
+ * EVENT: asynchronous messages sent from the device, maybe as a
+ * consequence of previous COMMANDs but disassociated from
+ * them.
+ *
+ * Only one request might be pending at the same time (ie: don't
+ * parallelize nor post another GET request before the previous
+ * COMMAND has been acknowledged with it's corresponding reply by the
+ * device).
+ *
+ * The different requests and their formats are described below:
+ *
+ * I2400M_MT_* Message types
+ * I2400M_MS_* Message status (for replies, events)
+ * i2400m_tlv_* TLVs
+ *
+ * data types are named 'struct i2400m_msg_OPNAME', OPNAME matching the
+ * operation.
+ */
+
+#ifndef __LINUX__WIMAX__I2400M_H__
+#define __LINUX__WIMAX__I2400M_H__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/*
+ * Host Device Interface (HDI) common to all busses
+ */
+
+/* Boot-mode (firmware upload mode) commands */
+
+/* Header for the firmware file */
+struct i2400m_bcf_hdr {
+ __le32 module_type;
+ __le32 header_len;
+ __le32 header_version;
+ __le32 module_id;
+ __le32 module_vendor;
+ __le32 date; /* BCD YYYMMDD */
+ __le32 size; /* in dwords */
+ __le32 key_size; /* in dwords */
+ __le32 modulus_size; /* in dwords */
+ __le32 exponent_size; /* in dwords */
+ __u8 reserved[88];
+} __attribute__ ((packed));
+
+/* Boot mode opcodes */
+enum i2400m_brh_opcode {
+ I2400M_BRH_READ = 1,
+ I2400M_BRH_WRITE = 2,
+ I2400M_BRH_JUMP = 3,
+ I2400M_BRH_SIGNED_JUMP = 8,
+ I2400M_BRH_HASH_PAYLOAD_ONLY = 9,
+};
+
+/* Boot mode command masks and stuff */
+enum i2400m_brh {
+ I2400M_BRH_SIGNATURE = 0xcbbc0000,
+ I2400M_BRH_SIGNATURE_MASK = 0xffff0000,
+ I2400M_BRH_SIGNATURE_SHIFT = 16,
+ I2400M_BRH_OPCODE_MASK = 0x0000000f,
+ I2400M_BRH_RESPONSE_MASK = 0x000000f0,
+ I2400M_BRH_RESPONSE_SHIFT = 4,
+ I2400M_BRH_DIRECT_ACCESS = 0x00000400,
+ I2400M_BRH_RESPONSE_REQUIRED = 0x00000200,
+ I2400M_BRH_USE_CHECKSUM = 0x00000100,
+};
+
+
+/**
+ * i2400m_bootrom_header - Header for a boot-mode command
+ *
+ * @cmd: the above command descriptor
+ * @target_addr: where on the device memory should the action be performed.
+ * @data_size: for read/write, amount of data to be read/written
+ * @block_checksum: checksum value (if applicable)
+ * @payload: the beginning of data attached to this header
+ */
+struct i2400m_bootrom_header {
+ __le32 command; /* Compose with enum i2400_brh */
+ __le32 target_addr;
+ __le32 data_size;
+ __le32 block_checksum;
+ char payload[0];
+} __attribute__ ((packed));
+
+
+/*
+ * Data / control protocol
+ */
+
+/* Packet types for the host-device interface */
+enum i2400m_pt {
+ I2400M_PT_DATA = 0,
+ I2400M_PT_CTRL,
+ I2400M_PT_TRACE, /* For device debug */
+ I2400M_PT_RESET_WARM, /* device reset */
+ I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */
+ I2400M_PT_EDATA, /* Extended RX data */
+ I2400M_PT_ILLEGAL
+};
+
+
+/*
+ * Payload for a data packet
+ *
+ * This is prefixed to each and every outgoing DATA type.
+ */
+struct i2400m_pl_data_hdr {
+ __le32 reserved;
+} __attribute__((packed));
+
+
+/*
+ * Payload for an extended data packet
+ *
+ * New in fw v1.4
+ *
+ * @reorder: if this payload has to be reorder or not (and how)
+ * @cs: the type of data in the packet, as defined per (802.16e
+ * T11.13.19.1). Currently only 2 (IPv4 packet) supported.
+ *
+ * This is prefixed to each and every INCOMING DATA packet.
+ */
+struct i2400m_pl_edata_hdr {
+ __le32 reorder; /* bits defined in i2400m_ro */
+ __u8 cs;
+ __u8 reserved[11];
+} __attribute__((packed));
+
+enum i2400m_cs {
+ I2400M_CS_IPV4_0 = 0,
+ I2400M_CS_IPV4 = 2,
+};
+
+enum i2400m_ro {
+ I2400M_RO_NEEDED = 0x01,
+ I2400M_RO_TYPE = 0x03,
+ I2400M_RO_TYPE_SHIFT = 1,
+ I2400M_RO_CIN = 0x0f,
+ I2400M_RO_CIN_SHIFT = 4,
+ I2400M_RO_FBN = 0x07ff,
+ I2400M_RO_FBN_SHIFT = 8,
+ I2400M_RO_SN = 0x07ff,
+ I2400M_RO_SN_SHIFT = 21,
+};
+
+enum i2400m_ro_type {
+ I2400M_RO_TYPE_RESET = 0,
+ I2400M_RO_TYPE_PACKET,
+ I2400M_RO_TYPE_WS,
+ I2400M_RO_TYPE_PACKET_WS,
+};
+
+
+/* Misc constants */
+enum {
+ I2400M_PL_ALIGN = 16, /* Payload data size alignment */
+ I2400M_PL_SIZE_MAX = 0x3EFF,
+ I2400M_MAX_PLS_IN_MSG = 60,
+ /* protocol barkers: sync sequences; for notifications they
+ * are sent in groups of four. */
+ I2400M_H2D_PREVIEW_BARKER = 0xcafe900d,
+ I2400M_COLD_RESET_BARKER = 0xc01dc01d,
+ I2400M_WARM_RESET_BARKER = 0x50f750f7,
+ I2400M_NBOOT_BARKER = 0xdeadbeef,
+ I2400M_SBOOT_BARKER = 0x0ff1c1a1,
+ I2400M_SBOOT_BARKER_6050 = 0x80000001,
+ I2400M_ACK_BARKER = 0xfeedbabe,
+ I2400M_D2H_MSG_BARKER = 0xbeefbabe,
+};
+
+
+/*
+ * Hardware payload descriptor
+ *
+ * Bitfields encoded in a struct to enforce typing semantics.
+ *
+ * Look in rx.c and tx.c for a full description of the format.
+ */
+struct i2400m_pld {
+ __le32 val;
+} __attribute__ ((packed));
+
+#define I2400M_PLD_SIZE_MASK 0x00003fff
+#define I2400M_PLD_TYPE_SHIFT 16
+#define I2400M_PLD_TYPE_MASK 0x000f0000
+
+/*
+ * Header for a TX message or RX message
+ *
+ * @barker: preamble
+ * @size: used for management of the FIFO queue buffer; before
+ * sending, this is converted to be a real preamble. This
+ * indicates the real size of the TX message that starts at this
+ * point. If the highest bit is set, then this message is to be
+ * skipped.
+ * @sequence: sequence number of this message
+ * @offset: offset where the message itself starts -- see the comments
+ * in the file header about message header and payload descriptor
+ * alignment.
+ * @num_pls: number of payloads in this message
+ * @padding: amount of padding bytes at the end of the message to make
+ * it be of block-size aligned
+ *
+ * Look in rx.c and tx.c for a full description of the format.
+ */
+struct i2400m_msg_hdr {
+ union {
+ __le32 barker;
+ __u32 size; /* same size type as barker!! */
+ };
+ union {
+ __le32 sequence;
+ __u32 offset; /* same size type as barker!! */
+ };
+ __le16 num_pls;
+ __le16 rsv1;
+ __le16 padding;
+ __le16 rsv2;
+ struct i2400m_pld pld[0];
+} __attribute__ ((packed));
+
+
+
+/*
+ * L3/L4 control protocol
+ */
+
+enum {
+ /* Interface version */
+ I2400M_L3L4_VERSION = 0x0100,
+};
+
+/* Message types */
+enum i2400m_mt {
+ I2400M_MT_RESERVED = 0x0000,
+ I2400M_MT_INVALID = 0xffff,
+ I2400M_MT_REPORT_MASK = 0x8000,
+
+ I2400M_MT_GET_SCAN_RESULT = 0x4202,
+ I2400M_MT_SET_SCAN_PARAM = 0x4402,
+ I2400M_MT_CMD_RF_CONTROL = 0x4602,
+ I2400M_MT_CMD_SCAN = 0x4603,
+ I2400M_MT_CMD_CONNECT = 0x4604,
+ I2400M_MT_CMD_DISCONNECT = 0x4605,
+ I2400M_MT_CMD_EXIT_IDLE = 0x4606,
+ I2400M_MT_GET_LM_VERSION = 0x5201,
+ I2400M_MT_GET_DEVICE_INFO = 0x5202,
+ I2400M_MT_GET_LINK_STATUS = 0x5203,
+ I2400M_MT_GET_STATISTICS = 0x5204,
+ I2400M_MT_GET_STATE = 0x5205,
+ I2400M_MT_GET_MEDIA_STATUS = 0x5206,
+ I2400M_MT_SET_INIT_CONFIG = 0x5404,
+ I2400M_MT_CMD_INIT = 0x5601,
+ I2400M_MT_CMD_TERMINATE = 0x5602,
+ I2400M_MT_CMD_MODE_OF_OP = 0x5603,
+ I2400M_MT_CMD_RESET_DEVICE = 0x5604,
+ I2400M_MT_CMD_MONITOR_CONTROL = 0x5605,
+ I2400M_MT_CMD_ENTER_POWERSAVE = 0x5606,
+ I2400M_MT_GET_TLS_OPERATION_RESULT = 0x6201,
+ I2400M_MT_SET_EAP_SUCCESS = 0x6402,
+ I2400M_MT_SET_EAP_FAIL = 0x6403,
+ I2400M_MT_SET_EAP_KEY = 0x6404,
+ I2400M_MT_CMD_SEND_EAP_RESPONSE = 0x6602,
+ I2400M_MT_REPORT_SCAN_RESULT = 0xc002,
+ I2400M_MT_REPORT_STATE = 0xd002,
+ I2400M_MT_REPORT_POWERSAVE_READY = 0xd005,
+ I2400M_MT_REPORT_EAP_REQUEST = 0xe002,
+ I2400M_MT_REPORT_EAP_RESTART = 0xe003,
+ I2400M_MT_REPORT_ALT_ACCEPT = 0xe004,
+ I2400M_MT_REPORT_KEY_REQUEST = 0xe005,
+};
+
+
+/*
+ * Message Ack Status codes
+ *
+ * When a message is replied-to, this status is reported.
+ */
+enum i2400m_ms {
+ I2400M_MS_DONE_OK = 0,
+ I2400M_MS_DONE_IN_PROGRESS = 1,
+ I2400M_MS_INVALID_OP = 2,
+ I2400M_MS_BAD_STATE = 3,
+ I2400M_MS_ILLEGAL_VALUE = 4,
+ I2400M_MS_MISSING_PARAMS = 5,
+ I2400M_MS_VERSION_ERROR = 6,
+ I2400M_MS_ACCESSIBILITY_ERROR = 7,
+ I2400M_MS_BUSY = 8,
+ I2400M_MS_CORRUPTED_TLV = 9,
+ I2400M_MS_UNINITIALIZED = 10,
+ I2400M_MS_UNKNOWN_ERROR = 11,
+ I2400M_MS_PRODUCTION_ERROR = 12,
+ I2400M_MS_NO_RF = 13,
+ I2400M_MS_NOT_READY_FOR_POWERSAVE = 14,
+ I2400M_MS_THERMAL_CRITICAL = 15,
+ I2400M_MS_MAX
+};
+
+
+/**
+ * i2400m_tlv - enumeration of the different types of TLVs
+ *
+ * TLVs stand for type-length-value and are the header for a payload
+ * composed of almost anything. Each payload has a type assigned
+ * and a length.
+ */
+enum i2400m_tlv {
+ I2400M_TLV_L4_MESSAGE_VERSIONS = 129,
+ I2400M_TLV_SYSTEM_STATE = 141,
+ I2400M_TLV_MEDIA_STATUS = 161,
+ I2400M_TLV_RF_OPERATION = 162,
+ I2400M_TLV_RF_STATUS = 163,
+ I2400M_TLV_DEVICE_RESET_TYPE = 132,
+ I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601,
+ I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611,
+ I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614,
+ I2400M_TLV_CONFIG_DL_HOST_REORDER = 615,
+};
+
+
+struct i2400m_tlv_hdr {
+ __le16 type;
+ __le16 length; /* payload's */
+ __u8 pl[0];
+} __attribute__((packed));
+
+
+struct i2400m_l3l4_hdr {
+ __le16 type;
+ __le16 length; /* payload's */
+ __le16 version;
+ __le16 resv1;
+ __le16 status;
+ __le16 resv2;
+ struct i2400m_tlv_hdr pl[0];
+} __attribute__((packed));
+
+
+/**
+ * i2400m_system_state - different states of the device
+ */
+enum i2400m_system_state {
+ I2400M_SS_UNINITIALIZED = 1,
+ I2400M_SS_INIT,
+ I2400M_SS_READY,
+ I2400M_SS_SCAN,
+ I2400M_SS_STANDBY,
+ I2400M_SS_CONNECTING,
+ I2400M_SS_WIMAX_CONNECTED,
+ I2400M_SS_DATA_PATH_CONNECTED,
+ I2400M_SS_IDLE,
+ I2400M_SS_DISCONNECTING,
+ I2400M_SS_OUT_OF_ZONE,
+ I2400M_SS_SLEEPACTIVE,
+ I2400M_SS_PRODUCTION,
+ I2400M_SS_CONFIG,
+ I2400M_SS_RF_OFF,
+ I2400M_SS_RF_SHUTDOWN,
+ I2400M_SS_DEVICE_DISCONNECT,
+ I2400M_SS_MAX,
+};
+
+
+/**
+ * i2400m_tlv_system_state - report on the state of the system
+ *
+ * @state: see enum i2400m_system_state
+ */
+struct i2400m_tlv_system_state {
+ struct i2400m_tlv_hdr hdr;
+ __le32 state;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_l4_message_versions {
+ struct i2400m_tlv_hdr hdr;
+ __le16 major;
+ __le16 minor;
+ __le16 branch;
+ __le16 reserved;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_detailed_device_info {
+ struct i2400m_tlv_hdr hdr;
+ __u8 reserved1[400];
+ __u8 mac_address[ETH_ALEN];
+ __u8 reserved2[2];
+} __attribute__((packed));
+
+
+enum i2400m_rf_switch_status {
+ I2400M_RF_SWITCH_ON = 1,
+ I2400M_RF_SWITCH_OFF = 2,
+};
+
+struct i2400m_tlv_rf_switches_status {
+ struct i2400m_tlv_hdr hdr;
+ __u8 sw_rf_switch; /* 1 ON, 2 OFF */
+ __u8 hw_rf_switch; /* 1 ON, 2 OFF */
+ __u8 reserved[2];
+} __attribute__((packed));
+
+
+enum {
+ i2400m_rf_operation_on = 1,
+ i2400m_rf_operation_off = 2
+};
+
+struct i2400m_tlv_rf_operation {
+ struct i2400m_tlv_hdr hdr;
+ __le32 status; /* 1 ON, 2 OFF */
+} __attribute__((packed));
+
+
+enum i2400m_tlv_reset_type {
+ I2400M_RESET_TYPE_COLD = 1,
+ I2400M_RESET_TYPE_WARM
+};
+
+struct i2400m_tlv_device_reset_type {
+ struct i2400m_tlv_hdr hdr;
+ __le32 reset_type;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_config_idle_parameters {
+ struct i2400m_tlv_hdr hdr;
+ __le32 idle_timeout; /* 100 to 300000 ms [5min], 100 increments
+ * 0 disabled */
+ __le32 idle_paging_interval; /* frames */
+} __attribute__((packed));
+
+
+enum i2400m_media_status {
+ I2400M_MEDIA_STATUS_LINK_UP = 1,
+ I2400M_MEDIA_STATUS_LINK_DOWN,
+ I2400M_MEDIA_STATUS_LINK_RENEW,
+};
+
+struct i2400m_tlv_media_status {
+ struct i2400m_tlv_hdr hdr;
+ __le32 media_status;
+} __attribute__((packed));
+
+
+/* New in v1.4 */
+struct i2400m_tlv_config_idle_timeout {
+ struct i2400m_tlv_hdr hdr;
+ __le32 timeout; /* 100 to 300000 ms [5min], 100 increments
+ * 0 disabled */
+} __attribute__((packed));
+
+/* New in v1.4 -- for backward compat, will be removed */
+struct i2400m_tlv_config_d2h_data_format {
+ struct i2400m_tlv_hdr hdr;
+ __u8 format; /* 0 old format, 1 enhanced */
+ __u8 reserved[3];
+} __attribute__((packed));
+
+/* New in v1.4 */
+struct i2400m_tlv_config_dl_host_reorder {
+ struct i2400m_tlv_hdr hdr;
+ __u8 reorder; /* 0 disabled, 1 enabled */
+ __u8 reserved[3];
+} __attribute__((packed));
+
+
+#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/staging/wimax/i2400m/netdev.c
index a7fcbceb6e6b..a7fcbceb6e6b 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/staging/wimax/i2400m/netdev.c
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
index 5c79f052cad2..fbddf2e18c14 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/staging/wimax/i2400m/op-rfkill.c
@@ -18,7 +18,7 @@
* switch (coming from sysfs, the wimax stack or user space).
*/
#include "i2400m.h"
-#include <linux/wimax/i2400m.h>
+#include "linux-wimax-i2400m.h"
#include <linux/slab.h>
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/staging/wimax/i2400m/rx.c
index c9fb619a9e01..c9fb619a9e01 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/staging/wimax/i2400m/rx.c
diff --git a/drivers/net/wimax/i2400m/sysfs.c b/drivers/staging/wimax/i2400m/sysfs.c
index 895ee265909b..895ee265909b 100644
--- a/drivers/net/wimax/i2400m/sysfs.c
+++ b/drivers/staging/wimax/i2400m/sysfs.c
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/staging/wimax/i2400m/tx.c
index 1255302e251e..1255302e251e 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/staging/wimax/i2400m/tx.c
diff --git a/drivers/net/wimax/i2400m/usb-debug-levels.h b/drivers/staging/wimax/i2400m/usb-debug-levels.h
index b6f7335de765..8fd0111560f6 100644
--- a/drivers/net/wimax/i2400m/usb-debug-levels.h
+++ b/drivers/staging/wimax/i2400m/usb-debug-levels.h
@@ -13,7 +13,7 @@
#define D_MODULENAME i2400m_usb
#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
-#include <linux/wimax/debug.h>
+#include "../linux-wimax-debug.h"
/* List of all the enabled modules */
enum d_module {
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/staging/wimax/i2400m/usb-fw.c
index 27ab233650d5..27ab233650d5 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/staging/wimax/i2400m/usb-fw.c
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/staging/wimax/i2400m/usb-notif.c
index 5d429f816125..5d429f816125 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/staging/wimax/i2400m/usb-notif.c
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/staging/wimax/i2400m/usb-rx.c
index 5b64bda7d9e7..5b64bda7d9e7 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/staging/wimax/i2400m/usb-rx.c
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/staging/wimax/i2400m/usb-tx.c
index 3ba9d70cca1b..3ba9d70cca1b 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/staging/wimax/i2400m/usb-tx.c
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/staging/wimax/i2400m/usb.c
index b684e97ac976..3b84dd7b5567 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/staging/wimax/i2400m/usb.c
@@ -49,7 +49,7 @@
* usb_reset_device()
*/
#include "i2400m-usb.h"
-#include <linux/wimax/i2400m.h>
+#include "linux-wimax-i2400m.h"
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/staging/wimax/id-table.c b/drivers/staging/wimax/id-table.c
new file mode 100644
index 000000000000..0e6f4aa87bc9
--- /dev/null
+++ b/drivers/staging/wimax/id-table.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Mappping of generic netlink family IDs to net devices
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * We assign a single generic netlink family ID to each device (to
+ * simplify lookup).
+ *
+ * We need a way to map family ID to a wimax_dev pointer.
+ *
+ * The idea is to use a very simple lookup. Using a netlink attribute
+ * with (for example) the interface name implies a heavier search over
+ * all the network devices; seemed kind of a waste given that we know
+ * we are looking for a WiMAX device and that most systems will have
+ * just a single WiMAX adapter.
+ *
+ * We put all the WiMAX devices in the system in a linked list and
+ * match the generic link family ID against the list.
+ *
+ * By using a linked list, the case of a single adapter in the system
+ * becomes (almost) no overhead, while still working for many more. If
+ * it ever goes beyond two, I'll be surprised.
+ */
+#include <linux/device.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include "linux-wimax.h"
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE id_table
+#include "debug-levels.h"
+
+
+static DEFINE_SPINLOCK(wimax_id_table_lock);
+static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table);
+
+
+/*
+ * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping
+ *
+ * @wimax_dev: WiMAX device descriptor to associate to the Generic
+ * Netlink family ID.
+ *
+ * Look for an empty spot in the ID table; if none found, double the
+ * table's size and get the first spot.
+ */
+void wimax_id_table_add(struct wimax_dev *wimax_dev)
+{
+ d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+ spin_lock(&wimax_id_table_lock);
+ list_add(&wimax_dev->id_table_node, &wimax_id_table);
+ spin_unlock(&wimax_id_table_lock);
+ d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+}
+
+
+/*
+ * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info
+ *
+ * The generic netlink family ID has been filled out in the
+ * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in
+ * the mapping table and reference the wimax_dev.
+ *
+ * When done, the reference should be dropped with
+ * 'dev_put(wimax_dev->net_dev)'.
+ */
+struct wimax_dev *wimax_dev_get_by_genl_info(
+ struct genl_info *info, int ifindex)
+{
+ struct wimax_dev *wimax_dev = NULL;
+
+ d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex);
+ spin_lock(&wimax_id_table_lock);
+ list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
+ if (wimax_dev->net_dev->ifindex == ifindex) {
+ dev_hold(wimax_dev->net_dev);
+ goto found;
+ }
+ }
+ wimax_dev = NULL;
+ d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
+ ifindex);
+found:
+ spin_unlock(&wimax_id_table_lock);
+ d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
+ info, ifindex, wimax_dev);
+ return wimax_dev;
+}
+
+
+/*
+ * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping
+ *
+ * @id: family ID to remove from the table
+ */
+void wimax_id_table_rm(struct wimax_dev *wimax_dev)
+{
+ spin_lock(&wimax_id_table_lock);
+ list_del_init(&wimax_dev->id_table_node);
+ spin_unlock(&wimax_id_table_lock);
+}
+
+
+/*
+ * Release the gennetlink family id / mapping table
+ *
+ * On debug, verify that the table is empty upon removal. We want the
+ * code always compiled, to ensure it doesn't bit rot. It will be
+ * compiled out if CONFIG_BUG is disabled.
+ */
+void wimax_id_table_release(void)
+{
+ struct wimax_dev *wimax_dev;
+
+#ifndef CONFIG_BUG
+ return;
+#endif
+ spin_lock(&wimax_id_table_lock);
+ list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
+ pr_err("BUG: %s wimax_dev %p ifindex %d not cleared\n",
+ __func__, wimax_dev, wimax_dev->net_dev->ifindex);
+ WARN_ON(1);
+ }
+ spin_unlock(&wimax_id_table_lock);
+}
diff --git a/drivers/staging/wimax/linux-wimax-debug.h b/drivers/staging/wimax/linux-wimax-debug.h
new file mode 100644
index 000000000000..5b5ec405143b
--- /dev/null
+++ b/drivers/staging/wimax/linux-wimax-debug.h
@@ -0,0 +1,491 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux WiMAX
+ * Collection of tools to manage debug operations.
+ *
+ * Copyright (C) 2005-2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * Don't #include this file directly, read on!
+ *
+ * EXECUTING DEBUGGING ACTIONS OR NOT
+ *
+ * The main thing this framework provides is decission power to take a
+ * debug action (like printing a message) if the current debug level
+ * allows it.
+ *
+ * The decission power is at two levels: at compile-time (what does
+ * not make it is compiled out) and at run-time. The run-time
+ * selection is done per-submodule (as they are declared by the user
+ * of the framework).
+ *
+ * A call to d_test(L) (L being the target debug level) returns true
+ * if the action should be taken because the current debug levels
+ * allow it (both compile and run time).
+ *
+ * It follows that a call to d_test() that can be determined to be
+ * always false at compile time will get the code depending on it
+ * compiled out by optimization.
+ *
+ * DEBUG LEVELS
+ *
+ * It is up to the caller to define how much a debugging level is.
+ *
+ * Convention sets 0 as "no debug" (so an action marked as debug level 0
+ * will always be taken). The increasing debug levels are used for
+ * increased verbosity.
+ *
+ * USAGE
+ *
+ * Group the code in modules and submodules inside each module [which
+ * in most cases maps to Linux modules and .c files that compose
+ * those].
+ *
+ * For each module, there is:
+ *
+ * - a MODULENAME (single word, legal C identifier)
+ *
+ * - a debug-levels.h header file that declares the list of
+ * submodules and that is included by all .c files that use
+ * the debugging tools. The file name can be anything.
+ *
+ * - some (optional) .c code to manipulate the runtime debug levels
+ * through debugfs.
+ *
+ * The debug-levels.h file would look like:
+ *
+ * #ifndef __debug_levels__h__
+ * #define __debug_levels__h__
+ *
+ * #define D_MODULENAME modulename
+ * #define D_MASTER 10
+ *
+ * #include "linux-wimax-debug.h"
+ *
+ * enum d_module {
+ * D_SUBMODULE_DECLARE(submodule_1),
+ * D_SUBMODULE_DECLARE(submodule_2),
+ * ...
+ * D_SUBMODULE_DECLARE(submodule_N)
+ * };
+ *
+ * #endif
+ *
+ * D_MASTER is the maximum compile-time debug level; any debug actions
+ * above this will be out. D_MODULENAME is the module name (legal C
+ * identifier), which has to be unique for each module (to avoid
+ * namespace collisions during linkage). Note those #defines need to
+ * be done before #including debug.h
+ *
+ * We declare N different submodules whose debug level can be
+ * independently controlled during runtime.
+ *
+ * In a .c file of the module (and only in one of them), define the
+ * following code:
+ *
+ * struct d_level D_LEVEL[] = {
+ * D_SUBMODULE_DEFINE(submodule_1),
+ * D_SUBMODULE_DEFINE(submodule_2),
+ * ...
+ * D_SUBMODULE_DEFINE(submodule_N),
+ * };
+ * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+ *
+ * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used
+ * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros
+ * #defined also in this file.
+ *
+ * To manipulate from user space the levels, create a debugfs dentry
+ * and then register each submodule with:
+ *
+ * d_level_register_debugfs("PREFIX_", submodule_X, parent);
+ *
+ * Where PREFIX_ is a name of your chosing. This will create debugfs
+ * file with a single numeric value that can be use to tweak it. To
+ * remove the entires, just use debugfs_remove_recursive() on 'parent'.
+ *
+ * NOTE: remember that even if this will show attached to some
+ * particular instance of a device, the settings are *global*.
+ *
+ * On each submodule (for example, .c files), the debug infrastructure
+ * should be included like this:
+ *
+ * #define D_SUBMODULE submodule_x // matches one in debug-levels.h
+ * #include "debug-levels.h"
+ *
+ * after #including all your include files.
+ *
+ * Now you can use the d_*() macros below [d_test(), d_fnstart(),
+ * d_fnend(), d_printf(), d_dump()].
+ *
+ * If their debug level is greater than D_MASTER, they will be
+ * compiled out.
+ *
+ * If their debug level is lower or equal than D_MASTER but greater
+ * than the current debug level of their submodule, they'll be
+ * ignored.
+ *
+ * Otherwise, the action will be performed.
+ */
+#ifndef __debug__h__
+#define __debug__h__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+struct device;
+
+/* Backend stuff */
+
+/*
+ * Debug backend: generate a message header from a 'struct device'
+ *
+ * @head: buffer where to place the header
+ * @head_size: length of @head
+ * @dev: pointer to device used to generate a header from. If NULL,
+ * an empty ("") header is generated.
+ */
+static inline
+void __d_head(char *head, size_t head_size,
+ struct device *dev)
+{
+ if (dev == NULL)
+ head[0] = 0;
+ else if ((unsigned long)dev < 4096) {
+ printk(KERN_ERR "E: Corrupt dev %p\n", dev);
+ WARN_ON(1);
+ } else
+ snprintf(head, head_size, "%s %s: ",
+ dev_driver_string(dev), dev_name(dev));
+}
+
+
+/*
+ * Debug backend: log some message if debugging is enabled
+ *
+ * @l: intended debug level
+ * @tag: tag to prefix the message with
+ * @dev: 'struct device' associated to this message
+ * @f: printf-like format and arguments
+ *
+ * Note this is optimized out if it doesn't pass the compile-time
+ * check; however, it is *always* compiled. This is useful to make
+ * sure the printf-like formats and variables are always checked and
+ * they don't get bit rot if you have all the debugging disabled.
+ */
+#define _d_printf(l, tag, dev, f, a...) \
+do { \
+ char head[64]; \
+ if (!d_test(l)) \
+ break; \
+ __d_head(head, sizeof(head), dev); \
+ printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \
+} while (0)
+
+
+/*
+ * CPP syntactic sugar to generate A_B like symbol names when one of
+ * the arguments is a preprocessor #define.
+ */
+#define __D_PASTE__(varname, modulename) varname##_##modulename
+#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
+#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name))
+
+
+/*
+ * Store a submodule's runtime debug level and name
+ */
+struct d_level {
+ u8 level;
+ const char *name;
+};
+
+
+/*
+ * List of available submodules and their debug levels
+ *
+ * We call them d_level_MODULENAME and d_level_size_MODULENAME; the
+ * macros D_LEVEL and D_LEVEL_SIZE contain the name already for
+ * convenience.
+ *
+ * This array and the size are defined on some .c file that is part of
+ * the current module.
+ */
+#define D_LEVEL __D_PASTE(d_level, D_MODULENAME)
+#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME)
+
+extern struct d_level D_LEVEL[];
+extern size_t D_LEVEL_SIZE;
+
+
+/*
+ * Frontend stuff
+ *
+ *
+ * Stuff you need to declare prior to using the actual "debug" actions
+ * (defined below).
+ */
+
+#ifndef D_MODULENAME
+#error D_MODULENAME is not defined in your debug-levels.h file
+/**
+ * D_MODULE - Name of the current module
+ *
+ * #define in your module's debug-levels.h, making sure it is
+ * unique. This has to be a legal C identifier.
+ */
+#define D_MODULENAME undefined_modulename
+#endif
+
+
+#ifndef D_MASTER
+#warning D_MASTER not defined, but debug.h included! [see docs]
+/**
+ * D_MASTER - Compile time maximum debug level
+ *
+ * #define in your debug-levels.h file to the maximum debug level the
+ * runtime code will be allowed to have. This allows you to provide a
+ * main knob.
+ *
+ * Anything above that level will be optimized out of the compile.
+ *
+ * Defaults to zero (no debug code compiled in).
+ *
+ * Maximum one definition per module (at the debug-levels.h file).
+ */
+#define D_MASTER 0
+#endif
+
+#ifndef D_SUBMODULE
+#error D_SUBMODULE not defined, but debug.h included! [see docs]
+/**
+ * D_SUBMODULE - Name of the current submodule
+ *
+ * #define in your submodule .c file before #including debug-levels.h
+ * to the name of the current submodule as previously declared and
+ * defined with D_SUBMODULE_DECLARE() (in your module's
+ * debug-levels.h) and D_SUBMODULE_DEFINE().
+ *
+ * This is used to provide runtime-control over the debug levels.
+ *
+ * Maximum one per .c file! Can be shared among different .c files
+ * (meaning they belong to the same submodule categorization).
+ */
+#define D_SUBMODULE undefined_module
+#endif
+
+
+/**
+ * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control
+ *
+ * @_name: name of the submodule, restricted to the chars that make up a
+ * valid C identifier ([a-zA-Z0-9_]).
+ *
+ * Declare in the module's debug-levels.h header file as:
+ *
+ * enum d_module {
+ * D_SUBMODULE_DECLARE(submodule_1),
+ * D_SUBMODULE_DECLARE(submodule_2),
+ * D_SUBMODULE_DECLARE(submodule_3),
+ * };
+ *
+ * Some corresponding .c file needs to have a matching
+ * D_SUBMODULE_DEFINE().
+ */
+#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name
+
+
+/**
+ * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control
+ *
+ * @_name: name of the submodule, restricted to the chars that make up a
+ * valid C identifier ([a-zA-Z0-9_]).
+ *
+ * Use once per module (in some .c file) as:
+ *
+ * static
+ * struct d_level d_level_SUBMODULENAME[] = {
+ * D_SUBMODULE_DEFINE(submodule_1),
+ * D_SUBMODULE_DEFINE(submodule_2),
+ * D_SUBMODULE_DEFINE(submodule_3),
+ * };
+ * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME);
+ *
+ * Matching D_SUBMODULE_DECLARE()s have to be present in a
+ * debug-levels.h header file.
+ */
+#define D_SUBMODULE_DEFINE(_name) \
+[__D_SUBMODULE_##_name] = { \
+ .level = 0, \
+ .name = #_name \
+}
+
+
+
+/* The actual "debug" operations */
+
+
+/**
+ * d_test - Returns true if debugging should be enabled
+ *
+ * @l: intended debug level (unsigned)
+ *
+ * If the master debug switch is enabled and the current settings are
+ * higher or equal to the requested level, then debugging
+ * output/actions should be enabled.
+ *
+ * NOTE:
+ *
+ * This needs to be coded so that it can be evaluated in compile
+ * time; this is why the ugly BUG_ON() is placed in there, so the
+ * D_MASTER evaluation compiles all out if it is compile-time false.
+ */
+#define d_test(l) \
+({ \
+ unsigned __l = l; /* type enforcer */ \
+ (D_MASTER) >= __l \
+ && ({ \
+ BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\
+ D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \
+ }); \
+})
+
+
+/**
+ * d_fnstart - log message at function start if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a)
+
+
+/**
+ * d_fnend - log message at function end if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a)
+
+
+/**
+ * d_printf - log message if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a)
+
+
+/**
+ * d_dump - log buffer hex dump if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_dump(l, dev, ptr, size) \
+do { \
+ char head[64]; \
+ if (!d_test(l)) \
+ break; \
+ __d_head(head, sizeof(head), dev); \
+ print_hex_dump(KERN_ERR, head, 0, 16, 1, \
+ ((void *) ptr), (size), 0); \
+} while (0)
+
+
+/**
+ * Export a submodule's debug level over debugfs as PREFIXSUBMODULE
+ *
+ * @prefix: string to prefix the name with
+ * @submodule: name of submodule (not a string, just the name)
+ * @dentry: debugfs parent dentry
+ *
+ * For removing, just use debugfs_remove_recursive() on the parent.
+ */
+#define d_level_register_debugfs(prefix, name, parent) \
+({ \
+ debugfs_create_u8( \
+ prefix #name, 0600, parent, \
+ &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \
+})
+
+
+static inline
+void d_submodule_set(struct d_level *d_level, size_t d_level_size,
+ const char *submodule, u8 level, const char *tag)
+{
+ struct d_level *itr, *top;
+ int index = -1;
+
+ for (itr = d_level, top = itr + d_level_size; itr < top; itr++) {
+ index++;
+ if (itr->name == NULL) {
+ printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n",
+ tag, itr, index);
+ continue;
+ }
+ if (!strcmp(itr->name, submodule)) {
+ itr->level = level;
+ return;
+ }
+ }
+ printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule);
+}
+
+
+/**
+ * d_parse_params - Parse a string with debug parameters from the
+ * command line
+ *
+ * @d_level: level structure (D_LEVEL)
+ * @d_level_size: number of items in the level structure
+ * (D_LEVEL_SIZE).
+ * @_params: string with the parameters; this is a space (not tab!)
+ * separated list of NAME:VALUE, where value is the debug level
+ * and NAME is the name of the submodule.
+ * @tag: string for error messages (example: MODULE.ARGNAME).
+ */
+static inline
+void d_parse_params(struct d_level *d_level, size_t d_level_size,
+ const char *_params, const char *tag)
+{
+ char submodule[130], *params, *params_orig, *token, *colon;
+ unsigned level, tokens;
+
+ if (_params == NULL)
+ return;
+ params_orig = kstrdup(_params, GFP_KERNEL);
+ params = params_orig;
+ while (1) {
+ token = strsep(&params, " ");
+ if (token == NULL)
+ break;
+ if (*token == '\0') /* eat joint spaces */
+ continue;
+ /* kernel's sscanf %s eats until whitespace, so we
+ * replace : by \n so it doesn't get eaten later by
+ * strsep */
+ colon = strchr(token, ':');
+ if (colon != NULL)
+ *colon = '\n';
+ tokens = sscanf(token, "%s\n%u", submodule, &level);
+ if (colon != NULL)
+ *colon = ':'; /* set back, for error messages */
+ if (tokens == 2)
+ d_submodule_set(d_level, d_level_size,
+ submodule, level, tag);
+ else
+ printk(KERN_ERR "%s: can't parse '%s' as a "
+ "SUBMODULE:LEVEL (%d tokens)\n",
+ tag, token, tokens);
+ }
+ kfree(params_orig);
+}
+
+#endif /* #ifndef __debug__h__ */
diff --git a/drivers/staging/wimax/linux-wimax.h b/drivers/staging/wimax/linux-wimax.h
new file mode 100644
index 000000000000..9f6b77af2f6d
--- /dev/null
+++ b/drivers/staging/wimax/linux-wimax.h
@@ -0,0 +1,239 @@
+/*
+ * Linux WiMax
+ * API for user space
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ *
+ * This file declares the user/kernel protocol that is spoken over
+ * Generic Netlink, as well as any type declaration that is to be used
+ * by kernel and user space.
+ *
+ * It is intended for user space to clone it verbatim to use it as a
+ * primary reference for definitions.
+ *
+ * Stuff intended for kernel usage as well as full protocol and stack
+ * documentation is rooted in include/net/wimax.h.
+ */
+
+#ifndef __LINUX__WIMAX_H__
+#define __LINUX__WIMAX_H__
+
+#include <linux/types.h>
+
+enum {
+ /**
+ * Version of the interface (unsigned decimal, MMm, max 25.5)
+ * M - Major: change if removing or modifying an existing call.
+ * m - minor: change when adding a new call
+ */
+ WIMAX_GNL_VERSION = 01,
+ /* Generic NetLink attributes */
+ WIMAX_GNL_ATTR_INVALID = 0x00,
+ WIMAX_GNL_ATTR_MAX = 10,
+};
+
+
+/*
+ * Generic NetLink operations
+ *
+ * Most of these map to an API call; _OP_ stands for operation, _RP_
+ * for reply and _RE_ for report (aka: signal).
+ */
+enum {
+ WIMAX_GNL_OP_MSG_FROM_USER, /* User to kernel message */
+ WIMAX_GNL_OP_MSG_TO_USER, /* Kernel to user message */
+ WIMAX_GNL_OP_RFKILL, /* Run wimax_rfkill() */
+ WIMAX_GNL_OP_RESET, /* Run wimax_rfkill() */
+ WIMAX_GNL_RE_STATE_CHANGE, /* Report: status change */
+ WIMAX_GNL_OP_STATE_GET, /* Request for current state */
+};
+
+
+/* Message from user / to user */
+enum {
+ WIMAX_GNL_MSG_IFIDX = 1,
+ WIMAX_GNL_MSG_PIPE_NAME,
+ WIMAX_GNL_MSG_DATA,
+};
+
+
+/*
+ * wimax_rfkill()
+ *
+ * The state of the radio (ON/OFF) is mapped to the rfkill subsystem's
+ * switch state (DISABLED/ENABLED).
+ */
+enum wimax_rf_state {
+ WIMAX_RF_OFF = 0, /* Radio is off, rfkill on/enabled */
+ WIMAX_RF_ON = 1, /* Radio is on, rfkill off/disabled */
+ WIMAX_RF_QUERY = 2,
+};
+
+/* Attributes */
+enum {
+ WIMAX_GNL_RFKILL_IFIDX = 1,
+ WIMAX_GNL_RFKILL_STATE,
+};
+
+
+/* Attributes for wimax_reset() */
+enum {
+ WIMAX_GNL_RESET_IFIDX = 1,
+};
+
+/* Attributes for wimax_state_get() */
+enum {
+ WIMAX_GNL_STGET_IFIDX = 1,
+};
+
+/*
+ * Attributes for the Report State Change
+ *
+ * For now we just have the old and new states; new attributes might
+ * be added later on.
+ */
+enum {
+ WIMAX_GNL_STCH_IFIDX = 1,
+ WIMAX_GNL_STCH_STATE_OLD,
+ WIMAX_GNL_STCH_STATE_NEW,
+};
+
+
+/**
+ * enum wimax_st - The different states of a WiMAX device
+ * @__WIMAX_ST_NULL: The device structure has been allocated and zeroed,
+ * but still wimax_dev_add() hasn't been called. There is no state.
+ *
+ * @WIMAX_ST_DOWN: The device has been registered with the WiMAX and
+ * networking stacks, but it is not initialized (normally that is
+ * done with 'ifconfig DEV up' [or equivalent], which can upload
+ * firmware and enable communications with the device).
+ * In this state, the device is powered down and using as less
+ * power as possible.
+ * This state is the default after a call to wimax_dev_add(). It
+ * is ok to have drivers move directly to %WIMAX_ST_UNINITIALIZED
+ * or %WIMAX_ST_RADIO_OFF in _probe() after the call to
+ * wimax_dev_add().
+ * It is recommended that the driver leaves this state when
+ * calling 'ifconfig DEV up' and enters it back on 'ifconfig DEV
+ * down'.
+ *
+ * @__WIMAX_ST_QUIESCING: The device is being torn down, so no API
+ * operations are allowed to proceed except the ones needed to
+ * complete the device clean up process.
+ *
+ * @WIMAX_ST_UNINITIALIZED: [optional] Communication with the device
+ * is setup, but the device still requires some configuration
+ * before being operational.
+ * Some WiMAX API calls might work.
+ *
+ * @WIMAX_ST_RADIO_OFF: The device is fully up; radio is off (wether
+ * by hardware or software switches).
+ * It is recommended to always leave the device in this state
+ * after initialization.
+ *
+ * @WIMAX_ST_READY: The device is fully up and radio is on.
+ *
+ * @WIMAX_ST_SCANNING: [optional] The device has been instructed to
+ * scan. In this state, the device cannot be actively connected to
+ * a network.
+ *
+ * @WIMAX_ST_CONNECTING: The device is connecting to a network. This
+ * state exists because in some devices, the connect process can
+ * include a number of negotiations between user space, kernel
+ * space and the device. User space needs to know what the device
+ * is doing. If the connect sequence in a device is atomic and
+ * fast, the device can transition directly to CONNECTED
+ *
+ * @WIMAX_ST_CONNECTED: The device is connected to a network.
+ *
+ * @__WIMAX_ST_INVALID: This is an invalid state used to mark the
+ * maximum numeric value of states.
+ *
+ * Description:
+ *
+ * Transitions from one state to another one are atomic and can only
+ * be caused in kernel space with wimax_state_change(). To read the
+ * state, use wimax_state_get().
+ *
+ * States starting with __ are internal and shall not be used or
+ * referred to by drivers or userspace. They look ugly, but that's the
+ * point -- if any use is made non-internal to the stack, it is easier
+ * to catch on review.
+ *
+ * All API operations [with well defined exceptions] will take the
+ * device mutex before starting and then check the state. If the state
+ * is %__WIMAX_ST_NULL, %WIMAX_ST_DOWN, %WIMAX_ST_UNINITIALIZED or
+ * %__WIMAX_ST_QUIESCING, it will drop the lock and quit with
+ * -%EINVAL, -%ENOMEDIUM, -%ENOTCONN or -%ESHUTDOWN.
+ *
+ * The order of the definitions is important, so we can do numerical
+ * comparisons (eg: < %WIMAX_ST_RADIO_OFF means the device is not ready
+ * to operate).
+ */
+/*
+ * The allowed state transitions are described in the table below
+ * (states in rows can go to states in columns where there is an X):
+ *
+ * UNINI RADIO READY SCAN CONNEC CONNEC
+ * NULL DOWN QUIESCING TIALIZED OFF NING TING TED
+ * NULL - x
+ * DOWN - x x x
+ * QUIESCING x -
+ * UNINITIALIZED x - x
+ * RADIO_OFF x - x
+ * READY x x - x x x
+ * SCANNING x x x - x x
+ * CONNECTING x x x x - x
+ * CONNECTED x x x -
+ *
+ * This table not available in kernel-doc because the formatting messes it up.
+ */
+ enum wimax_st {
+ __WIMAX_ST_NULL = 0,
+ WIMAX_ST_DOWN,
+ __WIMAX_ST_QUIESCING,
+ WIMAX_ST_UNINITIALIZED,
+ WIMAX_ST_RADIO_OFF,
+ WIMAX_ST_READY,
+ WIMAX_ST_SCANNING,
+ WIMAX_ST_CONNECTING,
+ WIMAX_ST_CONNECTED,
+ __WIMAX_ST_INVALID /* Always keep last */
+};
+
+
+#endif /* #ifndef __LINUX__WIMAX_H__ */
diff --git a/drivers/staging/wimax/net-wimax.h b/drivers/staging/wimax/net-wimax.h
new file mode 100644
index 000000000000..f578e345e2bd
--- /dev/null
+++ b/drivers/staging/wimax/net-wimax.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux WiMAX
+ * Kernel space API for accessing WiMAX devices
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * The WiMAX stack provides an API for controlling and managing the
+ * system's WiMAX devices. This API affects the control plane; the
+ * data plane is accessed via the network stack (netdev).
+ *
+ * Parts of the WiMAX stack API and notifications are exported to
+ * user space via Generic Netlink. In user space, libwimax (part of
+ * the wimax-tools package) provides a shim layer for accessing those
+ * calls.
+ *
+ * The API is standarized for all WiMAX devices and different drivers
+ * implement the backend support for it. However, device-specific
+ * messaging pipes are provided that can be used to issue commands and
+ * receive notifications in free form.
+ *
+ * Currently the messaging pipes are the only means of control as it
+ * is not known (due to the lack of more devices in the market) what
+ * will be a good abstraction layer. Expect this to change as more
+ * devices show in the market. This API is designed to be growable in
+ * order to address this problem.
+ *
+ * USAGE
+ *
+ * Embed a `struct wimax_dev` at the beginning of the device's
+ * private structure, initialize and register it. For details, see
+ * `struct wimax_dev`s documentation.
+ *
+ * Once this is done, wimax-tools's libwimaxll can be used to
+ * communicate with the driver from user space. You user space
+ * application does not have to forcibily use libwimaxll and can talk
+ * the generic netlink protocol directly if desired.
+ *
+ * Remember this is a very low level API that will to provide all of
+ * WiMAX features. Other daemons and services running in user space
+ * are the expected clients of it. They offer a higher level API that
+ * applications should use (an example of this is the Intel's WiMAX
+ * Network Service for the i2400m).
+ *
+ * DESIGN
+ *
+ * Although not set on final stone, this very basic interface is
+ * mostly completed. Remember this is meant to grow as new common
+ * operations are decided upon. New operations will be added to the
+ * interface, intent being on keeping backwards compatibility as much
+ * as possible.
+ *
+ * This layer implements a set of calls to control a WiMAX device,
+ * exposing a frontend to the rest of the kernel and user space (via
+ * generic netlink) and a backend implementation in the driver through
+ * function pointers.
+ *
+ * WiMAX devices have a state, and a kernel-only API allows the
+ * drivers to manipulate that state. State transitions are atomic, and
+ * only some of them are allowed (see `enum wimax_st`).
+ *
+ * Most API calls will set the state automatically; in most cases
+ * drivers have to only report state changes due to external
+ * conditions.
+ *
+ * All API operations are 'atomic', serialized through a mutex in the
+ * `struct wimax_dev`.
+ *
+ * EXPORTING TO USER SPACE THROUGH GENERIC NETLINK
+ *
+ * The API is exported to user space using generic netlink (other
+ * methods can be added as needed).
+ *
+ * There is a Generic Netlink Family named "WiMAX", where interfaces
+ * supporting the WiMAX interface receive commands and broadcast their
+ * signals over a multicast group named "msg".
+ *
+ * Mapping to the source/destination interface is done by an interface
+ * index attribute.
+ *
+ * For user-to-kernel traffic (commands) we use a function call
+ * marshalling mechanism, where a message X with attributes A, B, C
+ * sent from user space to kernel space means executing the WiMAX API
+ * call wimax_X(A, B, C), sending the results back as a message.
+ *
+ * Kernel-to-user (notifications or signals) communication is sent
+ * over multicast groups. This allows to have multiple applications
+ * monitoring them.
+ *
+ * Each command/signal gets assigned it's own attribute policy. This
+ * way the validator will verify that all the attributes in there are
+ * only the ones that should be for each command/signal. Thing of an
+ * attribute mapping to a type+argumentname for each command/signal.
+ *
+ * If we had a single policy for *all* commands/signals, after running
+ * the validator we'd have to check "does this attribute belong in
+ * here"? for each one. It can be done manually, but it's just easier
+ * to have the validator do that job with multiple policies. As well,
+ * it makes it easier to later expand each command/signal signature
+ * without affecting others and keeping the namespace more or less
+ * sane. Not that it is too complicated, but it makes it even easier.
+ *
+ * No state information is maintained in the kernel for each user
+ * space connection (the connection is stateless).
+ *
+ * TESTING FOR THE INTERFACE AND VERSIONING
+ *
+ * If network interface X is a WiMAX device, there will be a Generic
+ * Netlink family named "WiMAX X" and the device will present a
+ * "wimax" directory in it's network sysfs directory
+ * (/sys/class/net/DEVICE/wimax) [used by HAL].
+ *
+ * The inexistence of any of these means the device does not support
+ * this WiMAX API.
+ *
+ * By querying the generic netlink controller, versioning information
+ * and the multicast groups available can be found. Applications using
+ * the interface can either rely on that or use the generic netlink
+ * controller to figure out which generic netlink commands/signals are
+ * supported.
+ *
+ * NOTE: this versioning is a last resort to avoid hard
+ * incompatibilities. It is the intention of the design of this
+ * stack not to introduce backward incompatible changes.
+ *
+ * The version code has to fit in one byte (restrictions imposed by
+ * generic netlink); we use `version / 10` for the major version and
+ * `version % 10` for the minor. This gives 9 minors for each major
+ * and 25 majors.
+ *
+ * The version change protocol is as follow:
+ *
+ * - Major versions: needs to be increased if an existing message/API
+ * call is changed or removed. Doesn't need to be changed if a new
+ * message is added.
+ *
+ * - Minor version: needs to be increased if new messages/API calls are
+ * being added or some other consideration that doesn't impact the
+ * user-kernel interface too much (like some kind of bug fix) and
+ * that is kind of left up in the air to common sense.
+ *
+ * User space code should not try to work if the major version it was
+ * compiled for differs from what the kernel offers. As well, if the
+ * minor version of the kernel interface is lower than the one user
+ * space is expecting (the one it was compiled for), the kernel
+ * might be missing API calls; user space shall be ready to handle
+ * said condition. Use the generic netlink controller operations to
+ * find which ones are supported and which not.
+ *
+ * libwimaxll:wimaxll_open() takes care of checking versions.
+ *
+ * THE OPERATIONS:
+ *
+ * Each operation is defined in its on file (drivers/net/wimax/op-*.c)
+ * for clarity. The parts needed for an operation are:
+ *
+ * - a function pointer in `struct wimax_dev`: optional, as the
+ * operation might be implemented by the stack and not by the
+ * driver.
+ *
+ * All function pointers are named wimax_dev->op_*(), and drivers
+ * must implement them except where noted otherwise.
+ *
+ * - When exported to user space, a `struct nla_policy` to define the
+ * attributes of the generic netlink command and a `struct genl_ops`
+ * to define the operation.
+ *
+ * All the declarations for the operation codes (WIMAX_GNL_OP_<NAME>)
+ * and generic netlink attributes (WIMAX_GNL_<NAME>_*) are declared in
+ * include/linux/wimax.h; this file is intended to be cloned by user
+ * space to gain access to those declarations.
+ *
+ * A few caveats to remember:
+ *
+ * - Need to define attribute numbers starting in 1; otherwise it
+ * fails.
+ *
+ * - the `struct genl_family` requires a maximum attribute id; when
+ * defining the `struct nla_policy` for each message, it has to have
+ * an array size of WIMAX_GNL_ATTR_MAX+1.
+ *
+ * The op_*() function pointers will not be called if the wimax_dev is
+ * in a state <= %WIMAX_ST_UNINITIALIZED. The exception is:
+ *
+ * - op_reset: can be called at any time after wimax_dev_add() has
+ * been called.
+ *
+ * THE PIPE INTERFACE:
+ *
+ * This interface is kept intentionally simple. The driver can send
+ * and receive free-form messages to/from user space through a
+ * pipe. See drivers/net/wimax/op-msg.c for details.
+ *
+ * The kernel-to-user messages are sent with
+ * wimax_msg(). user-to-kernel messages are delivered via
+ * wimax_dev->op_msg_from_user().
+ *
+ * RFKILL:
+ *
+ * RFKILL support is built into the wimax_dev layer; the driver just
+ * needs to call wimax_report_rfkill_{hw,sw}() to inform of changes in
+ * the hardware or software RF kill switches. When the stack wants to
+ * turn the radio off, it will call wimax_dev->op_rfkill_sw_toggle(),
+ * which the driver implements.
+ *
+ * User space can set the software RF Kill switch by calling
+ * wimax_rfkill().
+ *
+ * The code for now only supports devices that don't require polling;
+ * If the device needs to be polled, create a self-rearming delayed
+ * work struct for polling or look into adding polled support to the
+ * WiMAX stack.
+ *
+ * When initializing the hardware (_probe), after calling
+ * wimax_dev_add(), query the device for it's RF Kill switches status
+ * and feed it back to the WiMAX stack using
+ * wimax_report_rfkill_{hw,sw}(). If any switch is missing, always
+ * report it as ON.
+ *
+ * NOTE: the wimax stack uses an inverted terminology to that of the
+ * RFKILL subsystem:
+ *
+ * - ON: radio is ON, RFKILL is DISABLED or OFF.
+ * - OFF: radio is OFF, RFKILL is ENABLED or ON.
+ *
+ * MISCELLANEOUS OPS:
+ *
+ * wimax_reset() can be used to reset the device to power on state; by
+ * default it issues a warm reset that maintains the same device
+ * node. If that is not possible, it falls back to a cold reset
+ * (device reconnect). The driver implements the backend to this
+ * through wimax_dev->op_reset().
+ */
+
+#ifndef __NET__WIMAX_H__
+#define __NET__WIMAX_H__
+
+#include "linux-wimax.h"
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+
+struct net_device;
+struct genl_info;
+struct wimax_dev;
+
+/**
+ * struct wimax_dev - Generic WiMAX device
+ *
+ * @net_dev: [fill] Pointer to the &struct net_device this WiMAX
+ * device implements.
+ *
+ * @op_msg_from_user: [fill] Driver-specific operation to
+ * handle a raw message from user space to the driver. The
+ * driver can send messages to user space using with
+ * wimax_msg_to_user().
+ *
+ * @op_rfkill_sw_toggle: [fill] Driver-specific operation to act on
+ * userspace (or any other agent) requesting the WiMAX device to
+ * change the RF Kill software switch (WIMAX_RF_ON or
+ * WIMAX_RF_OFF).
+ * If such hardware support is not present, it is assumed the
+ * radio cannot be switched off and it is always on (and the stack
+ * will error out when trying to switch it off). In such case,
+ * this function pointer can be left as NULL.
+ *
+ * @op_reset: [fill] Driver specific operation to reset the
+ * device.
+ * This operation should always attempt first a warm reset that
+ * does not disconnect the device from the bus and return 0.
+ * If that fails, it should resort to some sort of cold or bus
+ * reset (even if it implies a bus disconnection and device
+ * disappearance). In that case, -ENODEV should be returned to
+ * indicate the device is gone.
+ * This operation has to be synchronous, and return only when the
+ * reset is complete. In case of having had to resort to bus/cold
+ * reset implying a device disconnection, the call is allowed to
+ * return immediately.
+ * NOTE: wimax_dev->mutex is NOT locked when this op is being
+ * called; however, wimax_dev->mutex_reset IS locked to ensure
+ * serialization of calls to wimax_reset().
+ * See wimax_reset()'s documentation.
+ *
+ * @name: [fill] A way to identify this device. We need to register a
+ * name with many subsystems (rfkill, workqueue creation, etc).
+ * We can't use the network device name as that
+ * might change and in some instances we don't know it yet (until
+ * we don't call register_netdev()). So we generate an unique one
+ * using the driver name and device bus id, place it here and use
+ * it across the board. Recommended naming:
+ * DRIVERNAME-BUSNAME:BUSID (dev->bus->name, dev->bus_id).
+ *
+ * @id_table_node: [private] link to the list of wimax devices kept by
+ * id-table.c. Protected by it's own spinlock.
+ *
+ * @mutex: [private] Serializes all concurrent access and execution of
+ * operations.
+ *
+ * @mutex_reset: [private] Serializes reset operations. Needs to be a
+ * different mutex because as part of the reset operation, the
+ * driver has to call back into the stack to do things such as
+ * state change, that require wimax_dev->mutex.
+ *
+ * @state: [private] Current state of the WiMAX device.
+ *
+ * @rfkill: [private] integration into the RF-Kill infrastructure.
+ *
+ * @rf_sw: [private] State of the software radio switch (OFF/ON)
+ *
+ * @rf_hw: [private] State of the hardware radio switch (OFF/ON)
+ *
+ * @debugfs_dentry: [private] Used to hook up a debugfs entry. This
+ * shows up in the debugfs root as wimax\:DEVICENAME.
+ *
+ * Description:
+ * This structure defines a common interface to access all WiMAX
+ * devices from different vendors and provides a common API as well as
+ * a free-form device-specific messaging channel.
+ *
+ * Usage:
+ * 1. Embed a &struct wimax_dev at *the beginning* the network
+ * device structure so that netdev_priv() points to it.
+ *
+ * 2. memset() it to zero
+ *
+ * 3. Initialize with wimax_dev_init(). This will leave the WiMAX
+ * device in the %__WIMAX_ST_NULL state.
+ *
+ * 4. Fill all the fields marked with [fill]; once called
+ * wimax_dev_add(), those fields CANNOT be modified.
+ *
+ * 5. Call wimax_dev_add() *after* registering the network
+ * device. This will leave the WiMAX device in the %WIMAX_ST_DOWN
+ * state.
+ * Protect the driver's net_device->open() against succeeding if
+ * the wimax device state is lower than %WIMAX_ST_DOWN.
+ *
+ * 6. Select when the device is going to be turned on/initialized;
+ * for example, it could be initialized on 'ifconfig up' (when the
+ * netdev op 'open()' is called on the driver).
+ *
+ * When the device is initialized (at `ifconfig up` time, or right
+ * after calling wimax_dev_add() from _probe(), make sure the
+ * following steps are taken
+ *
+ * a. Move the device to %WIMAX_ST_UNINITIALIZED. This is needed so
+ * some API calls that shouldn't work until the device is ready
+ * can be blocked.
+ *
+ * b. Initialize the device. Make sure to turn the SW radio switch
+ * off and move the device to state %WIMAX_ST_RADIO_OFF when
+ * done. When just initialized, a device should be left in RADIO
+ * OFF state until user space devices to turn it on.
+ *
+ * c. Query the device for the state of the hardware rfkill switch
+ * and call wimax_rfkill_report_hw() and wimax_rfkill_report_sw()
+ * as needed. See below.
+ *
+ * wimax_dev_rm() undoes before unregistering the network device. Once
+ * wimax_dev_add() is called, the driver can get called on the
+ * wimax_dev->op_* function pointers
+ *
+ * CONCURRENCY:
+ *
+ * The stack provides a mutex for each device that will disallow API
+ * calls happening concurrently; thus, op calls into the driver
+ * through the wimax_dev->op*() function pointers will always be
+ * serialized and *never* concurrent.
+ *
+ * For locking, take wimax_dev->mutex is taken; (most) operations in
+ * the API have to check for wimax_dev_is_ready() to return 0 before
+ * continuing (this is done internally).
+ *
+ * REFERENCE COUNTING:
+ *
+ * The WiMAX device is reference counted by the associated network
+ * device. The only operation that can be used to reference the device
+ * is wimax_dev_get_by_genl_info(), and the reference it acquires has
+ * to be released with dev_put(wimax_dev->net_dev).
+ *
+ * RFKILL:
+ *
+ * At startup, both HW and SW radio switchess are assumed to be off.
+ *
+ * At initialization time [after calling wimax_dev_add()], have the
+ * driver query the device for the status of the software and hardware
+ * RF kill switches and call wimax_report_rfkill_hw() and
+ * wimax_rfkill_report_sw() to indicate their state. If any is
+ * missing, just call it to indicate it is ON (radio always on).
+ *
+ * Whenever the driver detects a change in the state of the RF kill
+ * switches, it should call wimax_report_rfkill_hw() or
+ * wimax_report_rfkill_sw() to report it to the stack.
+ */
+struct wimax_dev {
+ struct net_device *net_dev;
+ struct list_head id_table_node;
+ struct mutex mutex; /* Protects all members and API calls */
+ struct mutex mutex_reset;
+ enum wimax_st state;
+
+ int (*op_msg_from_user)(struct wimax_dev *wimax_dev,
+ const char *,
+ const void *, size_t,
+ const struct genl_info *info);
+ int (*op_rfkill_sw_toggle)(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state);
+ int (*op_reset)(struct wimax_dev *wimax_dev);
+
+ struct rfkill *rfkill;
+ unsigned int rf_hw;
+ unsigned int rf_sw;
+ char name[32];
+
+ struct dentry *debugfs_dentry;
+};
+
+
+
+/*
+ * WiMAX stack public API for device drivers
+ * -----------------------------------------
+ *
+ * These functions are not exported to user space.
+ */
+void wimax_dev_init(struct wimax_dev *);
+int wimax_dev_add(struct wimax_dev *, struct net_device *);
+void wimax_dev_rm(struct wimax_dev *);
+
+static inline
+struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
+{
+ return netdev_priv(net_dev);
+}
+
+static inline
+struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
+{
+ return wimax_dev->net_dev->dev.parent;
+}
+
+void wimax_state_change(struct wimax_dev *, enum wimax_st);
+enum wimax_st wimax_state_get(struct wimax_dev *);
+
+/*
+ * Radio Switch state reporting.
+ *
+ * enum wimax_rf_state is declared in linux/wimax.h so the exports
+ * to user space can use it.
+ */
+void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
+
+
+/*
+ * Free-form messaging to/from user space
+ *
+ * Sending a message:
+ *
+ * wimax_msg(wimax_dev, pipe_name, buf, buf_size, GFP_KERNEL);
+ *
+ * Broken up:
+ *
+ * skb = wimax_msg_alloc(wimax_dev, pipe_name, buf_size, GFP_KERNEL);
+ * ...fill up skb...
+ * wimax_msg_send(wimax_dev, pipe_name, skb);
+ *
+ * Be sure not to modify skb->data in the middle (ie: don't use
+ * skb_push()/skb_pull()/skb_reserve() on the skb).
+ *
+ * "pipe_name" is any string, that can be interpreted as the name of
+ * the pipe or recipient; the interpretation of it is driver
+ * specific, so the recipient can multiplex it as wished. It can be
+ * NULL, it won't be used - an example is using a "diagnostics" tag to
+ * send diagnostics information that a device-specific diagnostics
+ * tool would be interested in.
+ */
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
+ size_t, gfp_t);
+int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
+int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
+
+const void *wimax_msg_data_len(struct sk_buff *, size_t *);
+const void *wimax_msg_data(struct sk_buff *);
+ssize_t wimax_msg_len(struct sk_buff *);
+
+
+/*
+ * WiMAX stack user space API
+ * --------------------------
+ *
+ * This API is what gets exported to user space for general
+ * operations. As well, they can be called from within the kernel,
+ * (with a properly referenced `struct wimax_dev`).
+ *
+ * Properly referenced means: the 'struct net_device' that embeds the
+ * device's control structure and (as such) the 'struct wimax_dev' is
+ * referenced by the caller.
+ */
+int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
+int wimax_reset(struct wimax_dev *);
+
+#endif /* #ifndef __NET__WIMAX_H__ */
diff --git a/drivers/staging/wimax/op-msg.c b/drivers/staging/wimax/op-msg.c
new file mode 100644
index 000000000000..e20ac7d84e82
--- /dev/null
+++ b/drivers/staging/wimax/op-msg.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Generic messaging interface between userspace and driver/device
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This implements a direct communication channel between user space and
+ * the driver/device, by which free form messages can be sent back and
+ * forth.
+ *
+ * This is intended for device-specific features, vendor quirks, etc.
+ *
+ * See include/net/wimax.h
+ *
+ * GENERIC NETLINK ENCODING AND CAPACITY
+ *
+ * A destination "pipe name" is added to each message; it is up to the
+ * drivers to assign or use those names (if using them at all).
+ *
+ * Messages are encoded as a binary netlink attribute using nla_put()
+ * using type NLA_UNSPEC (as some versions of libnl still in
+ * deployment don't yet understand NLA_BINARY).
+ *
+ * The maximum capacity of this transport is PAGESIZE per message (so
+ * the actual payload will be bit smaller depending on the
+ * netlink/generic netlink attributes and headers).
+ *
+ * RECEPTION OF MESSAGES
+ *
+ * When a message is received from user space, it is passed verbatim
+ * to the driver calling wimax_dev->op_msg_from_user(). The return
+ * value from this function is passed back to user space as an ack
+ * over the generic netlink protocol.
+ *
+ * The stack doesn't do any processing or interpretation of these
+ * messages.
+ *
+ * SENDING MESSAGES
+ *
+ * Messages can be sent with wimax_msg().
+ *
+ * If the message delivery needs to happen on a different context to
+ * that of its creation, wimax_msg_alloc() can be used to get a
+ * pointer to the message that can be delivered later on with
+ * wimax_msg_send().
+ *
+ * ROADMAP
+ *
+ * wimax_gnl_doit_msg_from_user() Process a message from user space
+ * wimax_dev_get_by_genl_info()
+ * wimax_dev->op_msg_from_user() Delivery of message to the driver
+ *
+ * wimax_msg() Send a message to user space
+ * wimax_msg_alloc()
+ * wimax_msg_send()
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include "linux-wimax.h"
+#include <linux/security.h>
+#include <linux/export.h>
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE op_msg
+#include "debug-levels.h"
+
+
+/**
+ * wimax_msg_alloc - Create a new skb for sending a message to userspace
+ *
+ * @wimax_dev: WiMAX device descriptor
+ * @pipe_name: "named pipe" the message will be sent to
+ * @msg: pointer to the message data to send
+ * @size: size of the message to send (in bytes), including the header.
+ * @gfp_flags: flags for memory allocation.
+ *
+ * Returns: %0 if ok, negative errno code on error
+ *
+ * Description:
+ *
+ * Allocates an skb that will contain the message to send to user
+ * space over the messaging pipe and initializes it, copying the
+ * payload.
+ *
+ * Once this call is done, you can deliver it with
+ * wimax_msg_send().
+ *
+ * IMPORTANT:
+ *
+ * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
+ * wimax_msg_send() depends on skb->data being placed at the
+ * beginning of the user message.
+ *
+ * Unlike other WiMAX stack calls, this call can be used way early,
+ * even before wimax_dev_add() is called, as long as the
+ * wimax_dev->net_dev pointer is set to point to a proper
+ * net_dev. This is so that drivers can use it early in case they need
+ * to send stuff around or communicate with user space.
+ */
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
+ const char *pipe_name,
+ const void *msg, size_t size,
+ gfp_t gfp_flags)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ size_t msg_size;
+ void *genl_msg;
+ struct sk_buff *skb;
+
+ msg_size = nla_total_size(size)
+ + nla_total_size(sizeof(u32))
+ + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0);
+ result = -ENOMEM;
+ skb = genlmsg_new(msg_size, gfp_flags);
+ if (skb == NULL)
+ goto error_new;
+ genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family,
+ 0, WIMAX_GNL_OP_MSG_TO_USER);
+ if (genl_msg == NULL) {
+ dev_err(dev, "no memory to create generic netlink message\n");
+ goto error_genlmsg_put;
+ }
+ result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX,
+ wimax_dev->net_dev->ifindex);
+ if (result < 0) {
+ dev_err(dev, "no memory to add ifindex attribute\n");
+ goto error_nla_put;
+ }
+ if (pipe_name) {
+ result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME,
+ pipe_name);
+ if (result < 0) {
+ dev_err(dev, "no memory to add pipe_name attribute\n");
+ goto error_nla_put;
+ }
+ }
+ result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
+ if (result < 0) {
+ dev_err(dev, "no memory to add payload (msg %p size %zu) in "
+ "attribute: %d\n", msg, size, result);
+ goto error_nla_put;
+ }
+ genlmsg_end(skb, genl_msg);
+ return skb;
+
+error_nla_put:
+error_genlmsg_put:
+error_new:
+ nlmsg_free(skb);
+ return ERR_PTR(result);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_alloc);
+
+
+/**
+ * wimax_msg_data_len - Return a pointer and size of a message's payload
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ * @size: Pointer to where to store the message's size
+ *
+ * Returns the pointer to the message data.
+ */
+const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size)
+{
+ struct nlmsghdr *nlh = (void *) msg->head;
+ struct nlattr *nla;
+
+ nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+ WIMAX_GNL_MSG_DATA);
+ if (nla == NULL) {
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ return NULL;
+ }
+ *size = nla_len(nla);
+ return nla_data(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_data_len);
+
+
+/**
+ * wimax_msg_data - Return a pointer to a message's payload
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ */
+const void *wimax_msg_data(struct sk_buff *msg)
+{
+ struct nlmsghdr *nlh = (void *) msg->head;
+ struct nlattr *nla;
+
+ nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+ WIMAX_GNL_MSG_DATA);
+ if (nla == NULL) {
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ return NULL;
+ }
+ return nla_data(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_data);
+
+
+/**
+ * wimax_msg_len - Return a message's payload length
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ */
+ssize_t wimax_msg_len(struct sk_buff *msg)
+{
+ struct nlmsghdr *nlh = (void *) msg->head;
+ struct nlattr *nla;
+
+ nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+ WIMAX_GNL_MSG_DATA);
+ if (nla == NULL) {
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ return -EINVAL;
+ }
+ return nla_len(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_len);
+
+
+/**
+ * wimax_msg_send - Send a pre-allocated message to user space
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the
+ * ownership of @skb is transferred to this function.
+ *
+ * Returns: 0 if ok, < 0 errno code on error
+ *
+ * Description:
+ *
+ * Sends a free-form message that was preallocated with
+ * wimax_msg_alloc() and filled up.
+ *
+ * Assumes that once you pass an skb to this function for sending, it
+ * owns it and will release it when done (on success).
+ *
+ * IMPORTANT:
+ *
+ * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
+ * wimax_msg_send() depends on skb->data being placed at the
+ * beginning of the user message.
+ *
+ * Unlike other WiMAX stack calls, this call can be used way early,
+ * even before wimax_dev_add() is called, as long as the
+ * wimax_dev->net_dev pointer is set to point to a proper
+ * net_dev. This is so that drivers can use it early in case they need
+ * to send stuff around or communicate with user space.
+ */
+int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
+{
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ void *msg = skb->data;
+ size_t size = skb->len;
+ might_sleep();
+
+ d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size);
+ d_dump(2, dev, msg, size);
+ genlmsg_multicast(&wimax_gnl_family, skb, 0, 0, GFP_KERNEL);
+ d_printf(1, dev, "CTX: genl multicast done\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wimax_msg_send);
+
+
+/**
+ * wimax_msg - Send a message to user space
+ *
+ * @wimax_dev: WiMAX device descriptor (properly referenced)
+ * @pipe_name: "named pipe" the message will be sent to
+ * @buf: pointer to the message to send.
+ * @size: size of the buffer pointed to by @buf (in bytes).
+ * @gfp_flags: flags for memory allocation.
+ *
+ * Returns: %0 if ok, negative errno code on error.
+ *
+ * Description:
+ *
+ * Sends a free-form message to user space on the device @wimax_dev.
+ *
+ * NOTES:
+ *
+ * Once the @skb is given to this function, who will own it and will
+ * release it when done (unless it returns error).
+ */
+int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
+ const void *buf, size_t size, gfp_t gfp_flags)
+{
+ int result = -ENOMEM;
+ struct sk_buff *skb;
+
+ skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
+ if (IS_ERR(skb))
+ result = PTR_ERR(skb);
+ else
+ result = wimax_msg_send(wimax_dev, skb);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wimax_msg);
+
+/*
+ * Relays a message from user space to the driver
+ *
+ * The skb is passed to the driver-specific function with the netlink
+ * and generic netlink headers already stripped.
+ *
+ * This call will block while handling/relaying the message.
+ */
+int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
+{
+ int result, ifindex;
+ struct wimax_dev *wimax_dev;
+ struct device *dev;
+ struct nlmsghdr *nlh = info->nlhdr;
+ char *pipe_name;
+ void *msg_buf;
+ size_t msg_len;
+
+ might_sleep();
+ d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+ result = -ENODEV;
+ if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) {
+ pr_err("WIMAX_GNL_MSG_FROM_USER: can't find IFIDX attribute\n");
+ goto error_no_wimax_dev;
+ }
+ ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]);
+ wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+ if (wimax_dev == NULL)
+ goto error_no_wimax_dev;
+ dev = wimax_dev_to_dev(wimax_dev);
+
+ /* Unpack arguments */
+ result = -EINVAL;
+ if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) {
+ dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA "
+ "attribute\n");
+ goto error_no_data;
+ }
+ msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]);
+ msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]);
+
+ if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL)
+ pipe_name = NULL;
+ else {
+ struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME];
+ size_t attr_len = nla_len(attr);
+ /* libnl-1.1 does not yet support NLA_NUL_STRING */
+ result = -ENOMEM;
+ pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL);
+ if (pipe_name == NULL)
+ goto error_alloc;
+ pipe_name[attr_len] = 0;
+ }
+ mutex_lock(&wimax_dev->mutex);
+ result = wimax_dev_is_ready(wimax_dev);
+ if (result == -ENOMEDIUM)
+ result = 0;
+ if (result < 0)
+ goto error_not_ready;
+ result = -ENOSYS;
+ if (wimax_dev->op_msg_from_user == NULL)
+ goto error_noop;
+
+ d_printf(1, dev,
+ "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n",
+ nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags,
+ nlh->nlmsg_seq, nlh->nlmsg_pid);
+ d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len);
+ d_dump(2, dev, msg_buf, msg_len);
+
+ result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name,
+ msg_buf, msg_len, info);
+error_noop:
+error_not_ready:
+ mutex_unlock(&wimax_dev->mutex);
+error_alloc:
+ kfree(pipe_name);
+error_no_data:
+ dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+ d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+ return result;
+}
diff --git a/drivers/staging/wimax/op-reset.c b/drivers/staging/wimax/op-reset.c
new file mode 100644
index 000000000000..b3f000cbe112
--- /dev/null
+++ b/drivers/staging/wimax/op-reset.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Implement and export a method for resetting a WiMAX device
+ *
+ * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This implements a simple synchronous call to reset a WiMAX device.
+ *
+ * Resets aim at being warm, keeping the device handles active;
+ * however, when that fails, it falls back to a cold reset (that will
+ * disconnect and reconnect the device).
+ */
+
+#include "net-wimax.h"
+#include <net/genetlink.h>
+#include "linux-wimax.h"
+#include <linux/security.h>
+#include <linux/export.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE op_reset
+#include "debug-levels.h"
+
+
+/**
+ * wimax_reset - Reset a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Returns:
+ *
+ * %0 if ok and a warm reset was done (the device still exists in
+ * the system).
+ *
+ * -%ENODEV if a cold/bus reset had to be done (device has
+ * disconnected and reconnected, so current handle is not valid
+ * any more).
+ *
+ * -%EINVAL if the device is not even registered.
+ *
+ * Any other negative error code shall be considered as
+ * non-recoverable.
+ *
+ * Description:
+ *
+ * Called when wanting to reset the device for any reason. Device is
+ * taken back to power on status.
+ *
+ * This call blocks; on successful return, the device has completed the
+ * reset process and is ready to operate.
+ */
+int wimax_reset(struct wimax_dev *wimax_dev)
+{
+ int result = -EINVAL;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st state;
+
+ might_sleep();
+ d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+ mutex_lock(&wimax_dev->mutex);
+ dev_hold(wimax_dev->net_dev);
+ state = wimax_dev->state;
+ mutex_unlock(&wimax_dev->mutex);
+
+ if (state >= WIMAX_ST_DOWN) {
+ mutex_lock(&wimax_dev->mutex_reset);
+ result = wimax_dev->op_reset(wimax_dev);
+ mutex_unlock(&wimax_dev->mutex_reset);
+ }
+ dev_put(wimax_dev->net_dev);
+
+ d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+ return result;
+}
+EXPORT_SYMBOL(wimax_reset);
+
+
+/*
+ * Exporting to user space over generic netlink
+ *
+ * Parse the reset command from user space, return error code.
+ *
+ * No attributes.
+ */
+int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
+{
+ int result, ifindex;
+ struct wimax_dev *wimax_dev;
+
+ d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+ result = -ENODEV;
+ if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) {
+ pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n");
+ goto error_no_wimax_dev;
+ }
+ ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]);
+ wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+ if (wimax_dev == NULL)
+ goto error_no_wimax_dev;
+ /* Execute the operation and send the result back to user space */
+ result = wimax_reset(wimax_dev);
+ dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+ d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+ return result;
+}
diff --git a/drivers/staging/wimax/op-rfkill.c b/drivers/staging/wimax/op-rfkill.c
new file mode 100644
index 000000000000..78b294481a59
--- /dev/null
+++ b/drivers/staging/wimax/op-rfkill.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * RF-kill framework integration
+ *
+ * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This integrates into the Linux Kernel rfkill susbystem so that the
+ * drivers just have to do the bare minimal work, which is providing a
+ * method to set the software RF-Kill switch and to report changes in
+ * the software and hardware switch status.
+ *
+ * A non-polled generic rfkill device is embedded into the WiMAX
+ * subsystem's representation of a device.
+ *
+ * FIXME: Need polled support? Let drivers provide a poll routine
+ * and hand it to rfkill ops then?
+ *
+ * All device drivers have to do is after wimax_dev_init(), call
+ * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update
+ * initial state and then every time it changes. See wimax.h:struct
+ * wimax_dev for more information.
+ *
+ * ROADMAP
+ *
+ * wimax_gnl_doit_rfkill() User space calling wimax_rfkill()
+ * wimax_rfkill() Kernel calling wimax_rfkill()
+ * __wimax_rf_toggle_radio()
+ *
+ * wimax_rfkill_set_radio_block() RF-Kill subsystem calling
+ * __wimax_rf_toggle_radio()
+ *
+ * __wimax_rf_toggle_radio()
+ * wimax_dev->op_rfkill_sw_toggle() Driver backend
+ * __wimax_state_change()
+ *
+ * wimax_report_rfkill_sw() Driver reports state change
+ * __wimax_state_change()
+ *
+ * wimax_report_rfkill_hw() Driver reports state change
+ * __wimax_state_change()
+ *
+ * wimax_rfkill_add() Initialize/shutdown rfkill support
+ * wimax_rfkill_rm() [called by wimax_dev_add/rm()]
+ */
+
+#include "net-wimax.h"
+#include <net/genetlink.h>
+#include "linux-wimax.h"
+#include <linux/security.h>
+#include <linux/rfkill.h>
+#include <linux/export.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE op_rfkill
+#include "debug-levels.h"
+
+/**
+ * wimax_report_rfkill_hw - Reports changes in the hardware RF switch
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New state of the RF Kill switch. %WIMAX_RF_ON radio on,
+ * %WIMAX_RF_OFF radio off.
+ *
+ * When the device detects a change in the state of thehardware RF
+ * switch, it must call this function to let the WiMAX kernel stack
+ * know that the state has changed so it can be properly propagated.
+ *
+ * The WiMAX stack caches the state (the driver doesn't need to). As
+ * well, as the change is propagated it will come back as a request to
+ * change the software state to mirror the hardware state.
+ *
+ * If the device doesn't have a hardware kill switch, just report
+ * it on initialization as always on (%WIMAX_RF_ON, radio on).
+ */
+void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state state)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st wimax_state;
+
+ d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+ BUG_ON(state == WIMAX_RF_QUERY);
+ BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
+
+ mutex_lock(&wimax_dev->mutex);
+ result = wimax_dev_is_ready(wimax_dev);
+ if (result < 0)
+ goto error_not_ready;
+
+ if (state != wimax_dev->rf_hw) {
+ wimax_dev->rf_hw = state;
+ if (wimax_dev->rf_hw == WIMAX_RF_ON &&
+ wimax_dev->rf_sw == WIMAX_RF_ON)
+ wimax_state = WIMAX_ST_READY;
+ else
+ wimax_state = WIMAX_ST_RADIO_OFF;
+
+ result = rfkill_set_hw_state(wimax_dev->rfkill,
+ state == WIMAX_RF_OFF);
+
+ __wimax_state_change(wimax_dev, wimax_state);
+ }
+error_not_ready:
+ mutex_unlock(&wimax_dev->mutex);
+ d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
+ wimax_dev, state, result);
+}
+EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
+
+
+/**
+ * wimax_report_rfkill_sw - Reports changes in the software RF switch
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
+ * %WIMAX_RF_OFF radio off.
+ *
+ * Reports changes in the software RF switch state to the WiMAX stack.
+ *
+ * The main use is during initialization, so the driver can query the
+ * device for its current software radio kill switch state and feed it
+ * to the system.
+ *
+ * On the side, the device does not change the software state by
+ * itself. In practice, this can happen, as the device might decide to
+ * switch (in software) the radio off for different reasons.
+ */
+void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state state)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st wimax_state;
+
+ d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+ BUG_ON(state == WIMAX_RF_QUERY);
+ BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
+
+ mutex_lock(&wimax_dev->mutex);
+ result = wimax_dev_is_ready(wimax_dev);
+ if (result < 0)
+ goto error_not_ready;
+
+ if (state != wimax_dev->rf_sw) {
+ wimax_dev->rf_sw = state;
+ if (wimax_dev->rf_hw == WIMAX_RF_ON &&
+ wimax_dev->rf_sw == WIMAX_RF_ON)
+ wimax_state = WIMAX_ST_READY;
+ else
+ wimax_state = WIMAX_ST_RADIO_OFF;
+ __wimax_state_change(wimax_dev, wimax_state);
+ rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
+ }
+error_not_ready:
+ mutex_unlock(&wimax_dev->mutex);
+ d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
+ wimax_dev, state, result);
+}
+EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
+
+
+/*
+ * Callback for the RF Kill toggle operation
+ *
+ * This function is called by:
+ *
+ * - The rfkill subsystem when the RF-Kill key is pressed in the
+ * hardware and the driver notifies through
+ * wimax_report_rfkill_hw(). The rfkill subsystem ends up calling back
+ * here so the software RF Kill switch state is changed to reflect
+ * the hardware switch state.
+ *
+ * - When the user sets the state through sysfs' rfkill/state file
+ *
+ * - When the user calls wimax_rfkill().
+ *
+ * This call blocks!
+ *
+ * WARNING! When we call rfkill_unregister(), this will be called with
+ * state 0!
+ *
+ * WARNING: wimax_dev must be locked
+ */
+static
+int __wimax_rf_toggle_radio(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state state)
+{
+ int result = 0;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st wimax_state;
+
+ might_sleep();
+ d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+ if (wimax_dev->rf_sw == state)
+ goto out_no_change;
+ if (wimax_dev->op_rfkill_sw_toggle != NULL)
+ result = wimax_dev->op_rfkill_sw_toggle(wimax_dev, state);
+ else if (state == WIMAX_RF_OFF) /* No op? can't turn off */
+ result = -ENXIO;
+ else /* No op? can turn on */
+ result = 0; /* should never happen tho */
+ if (result >= 0) {
+ result = 0;
+ wimax_dev->rf_sw = state;
+ wimax_state = state == WIMAX_RF_ON ?
+ WIMAX_ST_READY : WIMAX_ST_RADIO_OFF;
+ __wimax_state_change(wimax_dev, wimax_state);
+ }
+out_no_change:
+ d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
+ wimax_dev, state, result);
+ return result;
+}
+
+
+/*
+ * Translate from rfkill state to wimax state
+ *
+ * NOTE: Special state handling rules here
+ *
+ * Just pretend the call didn't happen if we are in a state where
+ * we know for sure it cannot be handled (WIMAX_ST_DOWN or
+ * __WIMAX_ST_QUIESCING). rfkill() needs it to register and
+ * unregister, as it will run this path.
+ *
+ * NOTE: This call will block until the operation is completed.
+ */
+static int wimax_rfkill_set_radio_block(void *data, bool blocked)
+{
+ int result;
+ struct wimax_dev *wimax_dev = data;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_rf_state rf_state;
+
+ d_fnstart(3, dev, "(wimax_dev %p blocked %u)\n", wimax_dev, blocked);
+ rf_state = WIMAX_RF_ON;
+ if (blocked)
+ rf_state = WIMAX_RF_OFF;
+ mutex_lock(&wimax_dev->mutex);
+ if (wimax_dev->state <= __WIMAX_ST_QUIESCING)
+ result = 0;
+ else
+ result = __wimax_rf_toggle_radio(wimax_dev, rf_state);
+ mutex_unlock(&wimax_dev->mutex);
+ d_fnend(3, dev, "(wimax_dev %p blocked %u) = %d\n",
+ wimax_dev, blocked, result);
+ return result;
+}
+
+static const struct rfkill_ops wimax_rfkill_ops = {
+ .set_block = wimax_rfkill_set_radio_block,
+};
+
+/**
+ * wimax_rfkill - Set the software RF switch state for a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New RF state.
+ *
+ * Returns:
+ *
+ * >= 0 toggle state if ok, < 0 errno code on error. The toggle state
+ * is returned as a bitmap, bit 0 being the hardware RF state, bit 1
+ * the software RF state.
+ *
+ * 0 means disabled (%WIMAX_RF_ON, radio on), 1 means enabled radio
+ * off (%WIMAX_RF_OFF).
+ *
+ * Description:
+ *
+ * Called by the user when he wants to request the WiMAX radio to be
+ * switched on (%WIMAX_RF_ON) or off (%WIMAX_RF_OFF). With
+ * %WIMAX_RF_QUERY, just the current state is returned.
+ *
+ * NOTE:
+ *
+ * This call will block until the operation is complete.
+ */
+int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+
+ d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+ mutex_lock(&wimax_dev->mutex);
+ result = wimax_dev_is_ready(wimax_dev);
+ if (result < 0) {
+ /* While initializing, < 1.4.3 wimax-tools versions use
+ * this call to check if the device is a valid WiMAX
+ * device; so we allow it to proceed always,
+ * considering the radios are all off. */
+ if (result == -ENOMEDIUM && state == WIMAX_RF_QUERY)
+ result = WIMAX_RF_OFF << 1 | WIMAX_RF_OFF;
+ goto error_not_ready;
+ }
+ switch (state) {
+ case WIMAX_RF_ON:
+ case WIMAX_RF_OFF:
+ result = __wimax_rf_toggle_radio(wimax_dev, state);
+ if (result < 0)
+ goto error;
+ rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
+ break;
+ case WIMAX_RF_QUERY:
+ break;
+ default:
+ result = -EINVAL;
+ goto error;
+ }
+ result = wimax_dev->rf_sw << 1 | wimax_dev->rf_hw;
+error:
+error_not_ready:
+ mutex_unlock(&wimax_dev->mutex);
+ d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
+ wimax_dev, state, result);
+ return result;
+}
+EXPORT_SYMBOL(wimax_rfkill);
+
+
+/*
+ * Register a new WiMAX device's RF Kill support
+ *
+ * WARNING: wimax_dev->mutex must be unlocked
+ */
+int wimax_rfkill_add(struct wimax_dev *wimax_dev)
+{
+ int result;
+ struct rfkill *rfkill;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+
+ d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+ /* Initialize RF Kill */
+ result = -ENOMEM;
+ rfkill = rfkill_alloc(wimax_dev->name, dev, RFKILL_TYPE_WIMAX,
+ &wimax_rfkill_ops, wimax_dev);
+ if (rfkill == NULL)
+ goto error_rfkill_allocate;
+
+ d_printf(1, dev, "rfkill %p\n", rfkill);
+
+ wimax_dev->rfkill = rfkill;
+
+ rfkill_init_sw_state(rfkill, 1);
+ result = rfkill_register(wimax_dev->rfkill);
+ if (result < 0)
+ goto error_rfkill_register;
+
+ /* If there is no SW toggle op, SW RFKill is always on */
+ if (wimax_dev->op_rfkill_sw_toggle == NULL)
+ wimax_dev->rf_sw = WIMAX_RF_ON;
+
+ d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev);
+ return 0;
+
+error_rfkill_register:
+ rfkill_destroy(wimax_dev->rfkill);
+error_rfkill_allocate:
+ d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+ return result;
+}
+
+
+/*
+ * Deregister a WiMAX device's RF Kill support
+ *
+ * Ick, we can't call rfkill_free() after rfkill_unregister()...oh
+ * well.
+ *
+ * WARNING: wimax_dev->mutex must be unlocked
+ */
+void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
+{
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+ rfkill_unregister(wimax_dev->rfkill);
+ rfkill_destroy(wimax_dev->rfkill);
+ d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev);
+}
+
+
+/*
+ * Exporting to user space over generic netlink
+ *
+ * Parse the rfkill command from user space, return a combination
+ * value that describe the states of the different toggles.
+ *
+ * Only one attribute: the new state requested (on, off or no change,
+ * just query).
+ */
+
+int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
+{
+ int result, ifindex;
+ struct wimax_dev *wimax_dev;
+ struct device *dev;
+ enum wimax_rf_state new_state;
+
+ d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+ result = -ENODEV;
+ if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) {
+ pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n");
+ goto error_no_wimax_dev;
+ }
+ ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]);
+ wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+ if (wimax_dev == NULL)
+ goto error_no_wimax_dev;
+ dev = wimax_dev_to_dev(wimax_dev);
+ result = -EINVAL;
+ if (info->attrs[WIMAX_GNL_RFKILL_STATE] == NULL) {
+ dev_err(dev, "WIMAX_GNL_RFKILL: can't find RFKILL_STATE "
+ "attribute\n");
+ goto error_no_pid;
+ }
+ new_state = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_STATE]);
+
+ /* Execute the operation and send the result back to user space */
+ result = wimax_rfkill(wimax_dev, new_state);
+error_no_pid:
+ dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+ d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+ return result;
+}
diff --git a/drivers/staging/wimax/op-state-get.c b/drivers/staging/wimax/op-state-get.c
new file mode 100644
index 000000000000..c5bfbed505f5
--- /dev/null
+++ b/drivers/staging/wimax/op-state-get.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Implement and export a method for getting a WiMAX device current state
+ *
+ * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ *
+ * Based on previous WiMAX core work by:
+ * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+
+#include "net-wimax.h"
+#include <net/genetlink.h>
+#include "linux-wimax.h"
+#include <linux/security.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE op_state_get
+#include "debug-levels.h"
+
+
+/*
+ * Exporting to user space over generic netlink
+ *
+ * Parse the state get command from user space, return a combination
+ * value that describe the current state.
+ *
+ * No attributes.
+ */
+int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
+{
+ int result, ifindex;
+ struct wimax_dev *wimax_dev;
+
+ d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+ result = -ENODEV;
+ if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) {
+ pr_err("WIMAX_GNL_OP_STATE_GET: can't find IFIDX attribute\n");
+ goto error_no_wimax_dev;
+ }
+ ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]);
+ wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+ if (wimax_dev == NULL)
+ goto error_no_wimax_dev;
+ /* Execute the operation and send the result back to user space */
+ result = wimax_state_get(wimax_dev);
+ dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+ d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+ return result;
+}
diff --git a/drivers/staging/wimax/stack.c b/drivers/staging/wimax/stack.c
new file mode 100644
index 000000000000..ace24a6dfd2d
--- /dev/null
+++ b/drivers/staging/wimax/stack.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Initialization, addition and removal of wimax devices
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This implements:
+ *
+ * - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on
+ * addition/registration initialize all subfields and allocate
+ * generic netlink resources for user space communication. On
+ * removal/unregistration, undo all that.
+ *
+ * - device state machine [wimax_state_change()] and support to send
+ * reports to user space when the state changes
+ * [wimax_gnl_re_state_change*()].
+ *
+ * See include/net/wimax.h for rationales and design.
+ *
+ * ROADMAP
+ *
+ * [__]wimax_state_change() Called by drivers to update device's state
+ * wimax_gnl_re_state_change_alloc()
+ * wimax_gnl_re_state_change_send()
+ *
+ * wimax_dev_init() Init a device
+ * wimax_dev_add() Register
+ * wimax_rfkill_add()
+ * wimax_gnl_add() Register all the generic netlink resources.
+ * wimax_id_table_add()
+ * wimax_dev_rm() Unregister
+ * wimax_id_table_rm()
+ * wimax_gnl_rm()
+ * wimax_rfkill_rm()
+ */
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include "linux-wimax.h"
+#include <linux/module.h>
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE stack
+#include "debug-levels.h"
+
+static char wimax_debug_params[128];
+module_param_string(debug, wimax_debug_params, sizeof(wimax_debug_params),
+ 0644);
+MODULE_PARM_DESC(debug,
+ "String of space-separated NAME:VALUE pairs, where NAMEs "
+ "are the different debug submodules and VALUE are the "
+ "initial debug value to set.");
+
+/*
+ * Authoritative source for the RE_STATE_CHANGE attribute policy
+ *
+ * We don't really use it here, but /me likes to keep the definition
+ * close to where the data is generated.
+ */
+/*
+static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
+ [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
+ [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
+};
+*/
+
+
+/*
+ * Allocate a Report State Change message
+ *
+ * @header: save it, you need it for _send()
+ *
+ * Creates and fills a basic state change message; different code
+ * paths can then add more attributes to the message as needed.
+ *
+ * Use wimax_gnl_re_state_change_send() to send the returned skb.
+ *
+ * Returns: skb with the genl message if ok, IS_ERR() ptr on error
+ * with an errno code.
+ */
+static
+struct sk_buff *wimax_gnl_re_state_change_alloc(
+ struct wimax_dev *wimax_dev,
+ enum wimax_st new_state, enum wimax_st old_state,
+ void **header)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ void *data;
+ struct sk_buff *report_skb;
+
+ d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n",
+ wimax_dev, new_state, old_state);
+ result = -ENOMEM;
+ report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (report_skb == NULL) {
+ dev_err(dev, "RE_STCH: can't create message\n");
+ goto error_new;
+ }
+ /* FIXME: sending a group ID as the seq is wrong */
+ data = genlmsg_put(report_skb, 0, wimax_gnl_family.mcgrp_offset,
+ &wimax_gnl_family, 0, WIMAX_GNL_RE_STATE_CHANGE);
+ if (data == NULL) {
+ dev_err(dev, "RE_STCH: can't put data into message\n");
+ goto error_put;
+ }
+ *header = data;
+
+ result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state);
+ if (result < 0) {
+ dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result);
+ goto error_put;
+ }
+ result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state);
+ if (result < 0) {
+ dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result);
+ goto error_put;
+ }
+ result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX,
+ wimax_dev->net_dev->ifindex);
+ if (result < 0) {
+ dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n");
+ goto error_put;
+ }
+ d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n",
+ wimax_dev, new_state, old_state, report_skb);
+ return report_skb;
+
+error_put:
+ nlmsg_free(report_skb);
+error_new:
+ d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n",
+ wimax_dev, new_state, old_state, result);
+ return ERR_PTR(result);
+}
+
+
+/*
+ * Send a Report State Change message (as created with _alloc).
+ *
+ * @report_skb: as returned by wimax_gnl_re_state_change_alloc()
+ * @header: as returned by wimax_gnl_re_state_change_alloc()
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * If the message is NULL, pretend it didn't happen.
+ */
+static
+int wimax_gnl_re_state_change_send(
+ struct wimax_dev *wimax_dev, struct sk_buff *report_skb,
+ void *header)
+{
+ int result = 0;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n",
+ wimax_dev, report_skb);
+ if (report_skb == NULL) {
+ result = -ENOMEM;
+ goto out;
+ }
+ genlmsg_end(report_skb, header);
+ genlmsg_multicast(&wimax_gnl_family, report_skb, 0, 0, GFP_KERNEL);
+out:
+ d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
+ wimax_dev, report_skb, result);
+ return result;
+}
+
+
+static
+void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
+ unsigned int allowed_states_bm)
+{
+ if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
+ pr_err("SW BUG! Forbidden state change %u -> %u\n",
+ old_state, new_state);
+ }
+}
+
+
+/*
+ * Set the current state of a WiMAX device [unlocking version of
+ * wimax_state_change().
+ */
+void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
+{
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st old_state = wimax_dev->state;
+ struct sk_buff *stch_skb;
+ void *header;
+
+ d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n",
+ wimax_dev, new_state, old_state);
+
+ if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) {
+ dev_err(dev, "SW BUG: requesting invalid state %u\n",
+ new_state);
+ goto out;
+ }
+ if (old_state == new_state)
+ goto out;
+ header = NULL; /* gcc complains? can't grok why */
+ stch_skb = wimax_gnl_re_state_change_alloc(
+ wimax_dev, new_state, old_state, &header);
+
+ /* Verify the state transition and do exit-from-state actions */
+ switch (old_state) {
+ case __WIMAX_ST_NULL:
+ __check_new_state(old_state, new_state,
+ 1 << WIMAX_ST_DOWN);
+ break;
+ case WIMAX_ST_DOWN:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_UNINITIALIZED
+ | 1 << WIMAX_ST_RADIO_OFF);
+ break;
+ case __WIMAX_ST_QUIESCING:
+ __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN);
+ break;
+ case WIMAX_ST_UNINITIALIZED:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF);
+ break;
+ case WIMAX_ST_RADIO_OFF:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_READY);
+ break;
+ case WIMAX_ST_READY:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF
+ | 1 << WIMAX_ST_SCANNING
+ | 1 << WIMAX_ST_CONNECTING
+ | 1 << WIMAX_ST_CONNECTED);
+ break;
+ case WIMAX_ST_SCANNING:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF
+ | 1 << WIMAX_ST_READY
+ | 1 << WIMAX_ST_CONNECTING
+ | 1 << WIMAX_ST_CONNECTED);
+ break;
+ case WIMAX_ST_CONNECTING:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF
+ | 1 << WIMAX_ST_READY
+ | 1 << WIMAX_ST_SCANNING
+ | 1 << WIMAX_ST_CONNECTED);
+ break;
+ case WIMAX_ST_CONNECTED:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF
+ | 1 << WIMAX_ST_READY);
+ netif_tx_disable(wimax_dev->net_dev);
+ netif_carrier_off(wimax_dev->net_dev);
+ break;
+ case __WIMAX_ST_INVALID:
+ default:
+ dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n",
+ wimax_dev, wimax_dev->state);
+ WARN_ON(1);
+ goto out;
+ }
+
+ /* Execute the actions of entry to the new state */
+ switch (new_state) {
+ case __WIMAX_ST_NULL:
+ dev_err(dev, "SW BUG: wimax_dev %p entering NULL state "
+ "from %u\n", wimax_dev, wimax_dev->state);
+ WARN_ON(1); /* Nobody can enter this state */
+ break;
+ case WIMAX_ST_DOWN:
+ break;
+ case __WIMAX_ST_QUIESCING:
+ break;
+ case WIMAX_ST_UNINITIALIZED:
+ break;
+ case WIMAX_ST_RADIO_OFF:
+ break;
+ case WIMAX_ST_READY:
+ break;
+ case WIMAX_ST_SCANNING:
+ break;
+ case WIMAX_ST_CONNECTING:
+ break;
+ case WIMAX_ST_CONNECTED:
+ netif_carrier_on(wimax_dev->net_dev);
+ netif_wake_queue(wimax_dev->net_dev);
+ break;
+ case __WIMAX_ST_INVALID:
+ default:
+ BUG();
+ }
+ __wimax_state_set(wimax_dev, new_state);
+ if (!IS_ERR(stch_skb))
+ wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
+out:
+ d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
+ wimax_dev, new_state, old_state);
+}
+
+
+/**
+ * wimax_state_change - Set the current state of a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor (properly referenced)
+ * @new_state: New state to switch to
+ *
+ * This implements the state changes for the wimax devices. It will
+ *
+ * - verify that the state transition is legal (for now it'll just
+ * print a warning if not) according to the table in
+ * linux/wimax.h's documentation for 'enum wimax_st'.
+ *
+ * - perform the actions needed for leaving the current state and
+ * whichever are needed for entering the new state.
+ *
+ * - issue a report to user space indicating the new state (and an
+ * optional payload with information about the new state).
+ *
+ * NOTE: @wimax_dev must be locked
+ */
+void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
+{
+ /*
+ * A driver cannot take the wimax_dev out of the
+ * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If
+ * the wimax_dev's state is still NULL, we ignore any request
+ * to change its state because it means it hasn't been yet
+ * registered.
+ *
+ * There is no need to complain about it, as routines that
+ * call this might be shared from different code paths that
+ * are called before or after wimax_dev_add() has done its
+ * job.
+ */
+ mutex_lock(&wimax_dev->mutex);
+ if (wimax_dev->state > __WIMAX_ST_NULL)
+ __wimax_state_change(wimax_dev, new_state);
+ mutex_unlock(&wimax_dev->mutex);
+}
+EXPORT_SYMBOL_GPL(wimax_state_change);
+
+
+/**
+ * wimax_state_get() - Return the current state of a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Returns: Current state of the device according to its driver.
+ */
+enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev)
+{
+ enum wimax_st state;
+ mutex_lock(&wimax_dev->mutex);
+ state = wimax_dev->state;
+ mutex_unlock(&wimax_dev->mutex);
+ return state;
+}
+EXPORT_SYMBOL_GPL(wimax_state_get);
+
+
+/**
+ * wimax_dev_init - initialize a newly allocated instance
+ *
+ * @wimax_dev: WiMAX device descriptor to initialize.
+ *
+ * Initializes fields of a freshly allocated @wimax_dev instance. This
+ * function assumes that after allocation, the memory occupied by
+ * @wimax_dev was zeroed.
+ */
+void wimax_dev_init(struct wimax_dev *wimax_dev)
+{
+ INIT_LIST_HEAD(&wimax_dev->id_table_node);
+ __wimax_state_set(wimax_dev, __WIMAX_ST_NULL);
+ mutex_init(&wimax_dev->mutex);
+ mutex_init(&wimax_dev->mutex_reset);
+}
+EXPORT_SYMBOL_GPL(wimax_dev_init);
+
+/*
+ * There are multiple enums reusing the same values, adding
+ * others is only possible if they use a compatible policy.
+ */
+static const struct nla_policy wimax_gnl_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+ /*
+ * WIMAX_GNL_RESET_IFIDX, WIMAX_GNL_RFKILL_IFIDX,
+ * WIMAX_GNL_STGET_IFIDX, WIMAX_GNL_MSG_IFIDX
+ */
+ [1] = { .type = NLA_U32, },
+ /*
+ * WIMAX_GNL_RFKILL_STATE, WIMAX_GNL_MSG_PIPE_NAME
+ */
+ [2] = { .type = NLA_U32, }, /* enum wimax_rf_state */
+ /*
+ * WIMAX_GNL_MSG_DATA
+ */
+ [3] = { .type = NLA_UNSPEC, }, /* libnl doesn't grok BINARY yet */
+};
+
+static const struct genl_small_ops wimax_gnl_ops[] = {
+ {
+ .cmd = WIMAX_GNL_OP_MSG_FROM_USER,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = wimax_gnl_doit_msg_from_user,
+ },
+ {
+ .cmd = WIMAX_GNL_OP_RESET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = wimax_gnl_doit_reset,
+ },
+ {
+ .cmd = WIMAX_GNL_OP_RFKILL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = wimax_gnl_doit_rfkill,
+ },
+ {
+ .cmd = WIMAX_GNL_OP_STATE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = wimax_gnl_doit_state_get,
+ },
+};
+
+
+static
+size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
+ unsigned char *addr, size_t addr_len)
+{
+ unsigned int cnt, total;
+
+ for (total = cnt = 0; cnt < addr_len; cnt++)
+ total += scnprintf(addr_str + total, addr_str_size - total,
+ "%02x%c", addr[cnt],
+ cnt == addr_len - 1 ? '\0' : ':');
+ return total;
+}
+
+
+/**
+ * wimax_dev_add - Register a new WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's
+ * priv data). You must have called wimax_dev_init() on it before.
+ *
+ * @net_dev: net device the @wimax_dev is associated with. The
+ * function expects SET_NETDEV_DEV() and register_netdev() were
+ * already called on it.
+ *
+ * Registers the new WiMAX device, sets up the user-kernel control
+ * interface (generic netlink) and common WiMAX infrastructure.
+ *
+ * Note that the parts that will allow interaction with user space are
+ * setup at the very end, when the rest is in place, as once that
+ * happens, the driver might get user space control requests via
+ * netlink or from debugfs that might translate into calls into
+ * wimax_dev->op_*().
+ */
+int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev)
+{
+ int result;
+ struct device *dev = net_dev->dev.parent;
+ char addr_str[32];
+
+ d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev);
+
+ /* Do the RFKILL setup before locking, as RFKILL will call
+ * into our functions.
+ */
+ wimax_dev->net_dev = net_dev;
+ result = wimax_rfkill_add(wimax_dev);
+ if (result < 0)
+ goto error_rfkill_add;
+
+ /* Set up user-space interaction */
+ mutex_lock(&wimax_dev->mutex);
+ wimax_id_table_add(wimax_dev);
+ wimax_debugfs_add(wimax_dev);
+
+ __wimax_state_set(wimax_dev, WIMAX_ST_DOWN);
+ mutex_unlock(&wimax_dev->mutex);
+
+ wimax_addr_scnprint(addr_str, sizeof(addr_str),
+ net_dev->dev_addr, net_dev->addr_len);
+ dev_err(dev, "WiMAX interface %s (%s) ready\n",
+ net_dev->name, addr_str);
+ d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev);
+ return 0;
+
+error_rfkill_add:
+ d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n",
+ wimax_dev, net_dev, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wimax_dev_add);
+
+
+/**
+ * wimax_dev_rm - Unregister an existing WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Unregisters a WiMAX device previously registered for use with
+ * wimax_add_rm().
+ *
+ * IMPORTANT! Must call before calling unregister_netdev().
+ *
+ * After this function returns, you will not get any more user space
+ * control requests (via netlink or debugfs) and thus to wimax_dev->ops.
+ *
+ * Reentrancy control is ensured by setting the state to
+ * %__WIMAX_ST_QUIESCING. rfkill operations coming through
+ * wimax_*rfkill*() will be stopped by the quiescing state; ops coming
+ * from the rfkill subsystem will be stopped by the support being
+ * removed by wimax_rfkill_rm().
+ */
+void wimax_dev_rm(struct wimax_dev *wimax_dev)
+{
+ d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+
+ mutex_lock(&wimax_dev->mutex);
+ __wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
+ wimax_debugfs_rm(wimax_dev);
+ wimax_id_table_rm(wimax_dev);
+ __wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
+ mutex_unlock(&wimax_dev->mutex);
+ wimax_rfkill_rm(wimax_dev);
+ d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev);
+}
+EXPORT_SYMBOL_GPL(wimax_dev_rm);
+
+
+/* Debug framework control of debug levels */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(debugfs),
+ D_SUBMODULE_DEFINE(id_table),
+ D_SUBMODULE_DEFINE(op_msg),
+ D_SUBMODULE_DEFINE(op_reset),
+ D_SUBMODULE_DEFINE(op_rfkill),
+ D_SUBMODULE_DEFINE(op_state_get),
+ D_SUBMODULE_DEFINE(stack),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
+static const struct genl_multicast_group wimax_gnl_mcgrps[] = {
+ { .name = "msg", },
+};
+
+struct genl_family wimax_gnl_family __ro_after_init = {
+ .name = "WiMAX",
+ .version = WIMAX_GNL_VERSION,
+ .hdrsize = 0,
+ .maxattr = WIMAX_GNL_ATTR_MAX,
+ .policy = wimax_gnl_policy,
+ .module = THIS_MODULE,
+ .small_ops = wimax_gnl_ops,
+ .n_small_ops = ARRAY_SIZE(wimax_gnl_ops),
+ .mcgrps = wimax_gnl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(wimax_gnl_mcgrps),
+};
+
+
+
+/* Shutdown the wimax stack */
+static
+int __init wimax_subsys_init(void)
+{
+ int result;
+
+ d_fnstart(4, NULL, "()\n");
+ d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params,
+ "wimax.debug");
+
+ result = genl_register_family(&wimax_gnl_family);
+ if (unlikely(result < 0)) {
+ pr_err("cannot register generic netlink family: %d\n", result);
+ goto error_register_family;
+ }
+
+ d_fnend(4, NULL, "() = 0\n");
+ return 0;
+
+error_register_family:
+ d_fnend(4, NULL, "() = %d\n", result);
+ return result;
+
+}
+module_init(wimax_subsys_init);
+
+
+/* Shutdown the wimax stack */
+static
+void __exit wimax_subsys_exit(void)
+{
+ wimax_id_table_release();
+ genl_unregister_family(&wimax_gnl_family);
+}
+module_exit(wimax_subsys_exit);
+
+MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
+MODULE_DESCRIPTION("Linux WiMAX stack");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wimax/wimax-internal.h b/drivers/staging/wimax/wimax-internal.h
new file mode 100644
index 000000000000..a6b6990642a1
--- /dev/null
+++ b/drivers/staging/wimax/wimax-internal.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux WiMAX
+ * Internal API for kernel space WiMAX stack
+ *
+ * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This header file is for declarations and definitions internal to
+ * the WiMAX stack. For public APIs and documentation, see
+ * include/net/wimax.h and include/linux/wimax.h.
+ */
+
+#ifndef __WIMAX_INTERNAL_H__
+#define __WIMAX_INTERNAL_H__
+#ifdef __KERNEL__
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include "net-wimax.h"
+
+
+/*
+ * Decide if a (locked) device is ready for use
+ *
+ * Before using the device structure, it must be locked
+ * (wimax_dev->mutex). As well, most operations need to call this
+ * function to check if the state is the right one.
+ *
+ * An error value will be returned if the state is not the right
+ * one. In that case, the caller should not attempt to use the device
+ * and just unlock it.
+ */
+static inline __must_check
+int wimax_dev_is_ready(struct wimax_dev *wimax_dev)
+{
+ if (wimax_dev->state == __WIMAX_ST_NULL)
+ return -EINVAL; /* Device is not even registered! */
+ if (wimax_dev->state == WIMAX_ST_DOWN)
+ return -ENOMEDIUM;
+ if (wimax_dev->state == __WIMAX_ST_QUIESCING)
+ return -ESHUTDOWN;
+ return 0;
+}
+
+
+static inline
+void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
+{
+ wimax_dev->state = state;
+}
+void __wimax_state_change(struct wimax_dev *, enum wimax_st);
+
+#ifdef CONFIG_DEBUG_FS
+void wimax_debugfs_add(struct wimax_dev *);
+void wimax_debugfs_rm(struct wimax_dev *);
+#else
+static inline void wimax_debugfs_add(struct wimax_dev *wimax_dev) {}
+static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
+#endif
+
+void wimax_id_table_add(struct wimax_dev *);
+struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
+void wimax_id_table_rm(struct wimax_dev *);
+void wimax_id_table_release(void);
+
+int wimax_rfkill_add(struct wimax_dev *);
+void wimax_rfkill_rm(struct wimax_dev *);
+
+/* generic netlink */
+extern struct genl_family wimax_gnl_family;
+
+/* ops */
+int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info);
+int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info);
+int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info);
+int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info);
+
+#endif /* #ifdef __KERNEL__ */
+#endif /* #ifndef __WIMAX_INTERNAL_H__ */